repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/panelnd.py | 14 | 4605 | """ Factory methods to create N-D panels """
import warnings
from pandas.compat import zip
import pandas.compat as compat
def create_nd_panel_factory(klass_name, orders, slices, slicer, aliases=None,
stat_axis=2, info_axis=0, ns=None):
""" manufacture a n-d class:
DEPRECATED. Panelnd is deprecated and will be removed in a future version.
The recommended way to represent these types of n-dimensional data are with
the `xarray package <http://xarray.pydata.org/en/stable/>`__.
Pandas provides a `.to_xarray()` method to automate this conversion.
Parameters
----------
klass_name : the klass name
orders : the names of the axes in order (highest to lowest)
slices : a dictionary that defines how the axes map to the slice axis
slicer : the class representing a slice of this panel
aliases : a dictionary defining aliases for various axes
default = { major : major_axis, minor : minor_axis }
stat_axis : the default statistic axis default = 2
info_axis : the info axis
Returns
-------
a class object representing this panel
"""
# if slicer is a name, get the object
if isinstance(slicer, compat.string_types):
import pandas
try:
slicer = getattr(pandas, slicer)
except:
raise Exception("cannot create this slicer [%s]" % slicer)
# build the klass
ns = {} if not ns else ns
klass = type(klass_name, (slicer, ), ns)
# setup the axes
klass._setup_axes(axes=orders, info_axis=info_axis, stat_axis=stat_axis,
aliases=aliases, slicers=slices)
klass._constructor_sliced = slicer
# define the methods ####
def __init__(self, *args, **kwargs):
# deprecation GH13564
warnings.warn("\n{klass} is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of n-dimensional data are with the\n"
"`xarray package "
"<http://xarray.pydata.org/en/stable/>`__.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n".format(
klass=self.__class__.__name__),
FutureWarning, stacklevel=2)
if not (kwargs.get('data') or len(args)):
raise Exception("must supply at least a data argument to [%s]" %
klass_name)
if 'copy' not in kwargs:
kwargs['copy'] = False
if 'dtype' not in kwargs:
kwargs['dtype'] = None
self._init_data(*args, **kwargs)
klass.__init__ = __init__
def _get_plane_axes_index(self, axis):
""" return the sliced index for this object """
# TODO: axis_name is not used, remove?
axis_name = self._get_axis_name(axis) # noqa
index = self._AXIS_ORDERS.index(axis)
planes = []
if index:
planes.extend(self._AXIS_ORDERS[0:index])
if index != self._AXIS_LEN:
planes.extend(self._AXIS_ORDERS[index + 1:])
return planes
klass._get_plane_axes_index = _get_plane_axes_index
def _combine(self, other, func, axis=0):
if isinstance(other, klass):
return self._combine_with_constructor(other, func)
return super(klass, self)._combine(other, func, axis=axis)
klass._combine = _combine
def _combine_with_constructor(self, other, func):
# combine labels to form new axes
new_axes = []
for a in self._AXIS_ORDERS:
new_axes.append(getattr(self, a).union(getattr(other, a)))
# reindex: could check that everything's the same size, but forget it
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, new_axes)])
d['copy'] = False
this = self.reindex(**d)
other = other.reindex(**d)
result_values = func(this.values, other.values)
return self._constructor(result_values, **d)
klass._combine_with_constructor = _combine_with_constructor
# set as NonImplemented operations which we don't support
for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter',
'dropna', 'shift']:
def func(self, *args, **kwargs):
raise NotImplementedError("this operation is not supported")
setattr(klass, f, func)
# add the aggregate operations
klass._add_aggregate_operations()
klass._add_numeric_operations()
return klass
| mit |
1013553207/django | django/db/models/options.py | 149 | 36502 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango110Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 1.10.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango110Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
MungoRae/home-assistant | homeassistant/components/ecobee.py | 1 | 3654 | """
Support for Ecobee.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ecobee/
"""
import logging
import os
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import CONF_API_KEY
from homeassistant.util import Throttle
REQUIREMENTS = ['python-ecobee-api==0.0.7']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_HOLD_TEMP = 'hold_temp'
DOMAIN = 'ecobee'
ECOBEE_CONFIG_FILE = 'ecobee.conf'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=180)
NETWORK = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
def request_configuration(network, hass, config):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
if 'ecobee' in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['ecobee'], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def ecobee_configuration_callback(callback_data):
"""Handle configuration callbacks."""
network.request_tokens()
network.update()
setup_ecobee(hass, network, config)
_CONFIGURING['ecobee'] = configurator.request_config(
"Ecobee", ecobee_configuration_callback,
description=(
'Please authorize this app at https://www.ecobee.com/consumer'
'portal/index.html with pin code: ' + network.pin),
description_image="/static/images/config_ecobee_thermostat.png",
submit_caption="I have authorized the app."
)
def setup_ecobee(hass, network, config):
"""Set up the Ecobee thermostat."""
# If ecobee has a PIN then it needs to be configured.
if network.pin is not None:
request_configuration(network, hass, config)
return
if 'ecobee' in _CONFIGURING:
configurator = hass.components.configurator
configurator.request_done(_CONFIGURING.pop('ecobee'))
hold_temp = config[DOMAIN].get(CONF_HOLD_TEMP)
discovery.load_platform(
hass, 'climate', DOMAIN, {'hold_temp': hold_temp}, config)
discovery.load_platform(hass, 'sensor', DOMAIN, {}, config)
discovery.load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
class EcobeeData(object):
"""Get the latest data and update the states."""
def __init__(self, config_file):
"""Init the Ecobee data object."""
from pyecobee import Ecobee
self.ecobee = Ecobee(config_file)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from pyecobee."""
self.ecobee.update()
_LOGGER.info("Ecobee data updated successfully")
def setup(hass, config):
"""Set up the Ecobee.
Will automatically load thermostat and sensor components to support
devices discovered on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
if 'ecobee' in _CONFIGURING:
return
from pyecobee import config_from_file
# Create ecobee.conf if it doesn't exist
if not os.path.isfile(hass.config.path(ECOBEE_CONFIG_FILE)):
jsonconfig = {"API_KEY": config[DOMAIN].get(CONF_API_KEY)}
config_from_file(hass.config.path(ECOBEE_CONFIG_FILE), jsonconfig)
NETWORK = EcobeeData(hass.config.path(ECOBEE_CONFIG_FILE))
setup_ecobee(hass, NETWORK.ecobee, config)
return True
| apache-2.0 |
wolfskaempf/ga_statistics | lib/python2.7/site-packages/django/template/utils.py | 41 | 4662 | import os
import warnings
from collections import Counter, OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler(object):
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
if not self._templates:
warnings.warn(
"You haven't defined a TEMPLATES setting. You must do so "
"before upgrading to Django 2.0. Otherwise Django will be "
"unable to load templates.", RemovedInDjango20Warning)
self._templates = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': settings.TEMPLATE_DIRS,
'OPTIONS': {
'allowed_include_roots': settings.ALLOWED_INCLUDE_ROOTS,
'context_processors': settings.TEMPLATE_CONTEXT_PROCESSORS,
'debug': settings.TEMPLATE_DEBUG,
'loaders': settings.TEMPLATE_LOADERS,
'string_if_invalid': settings.TEMPLATE_STRING_IF_INVALID,
},
},
]
templates = OrderedDict()
for tpl in self._templates:
tpl = tpl.copy()
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl.setdefault('NAME', default_name)
tpl.setdefault('DIRS', [])
tpl.setdefault('APP_DIRS', False)
tpl.setdefault('OPTIONS', {})
templates[tpl['NAME']] = tpl
counts = Counter(list(templates))
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@lru_cache.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = []
for app_config in apps.get_app_configs():
if not app_config.path:
continue
template_dir = os.path.join(app_config.path, dirname)
if os.path.isdir(template_dir):
template_dirs.append(upath(template_dir))
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| mit |
sfischer13/python-arpa | tests/test_model_base.py | 1 | 2958 | import arpa
from arpa.models.base import ARPAModel
from arpa.models.simple import ARPAModelSimple
import pytest
from test_arpa import PARSERS
from test_arpa import TEST_ARPA
def test_manual_log_p_unk():
lm = arpa.loadf(TEST_ARPA)[0]
assert lm.log_p('UnladenSwallow') == -1.995635
def test_manual_p():
lm = arpa.loadf(TEST_ARPA)[0]
assert round(lm.p('<s>'), 4) == 0
def test_manual_contains():
lm = arpa.loadf(TEST_ARPA)[0]
assert 'foo' in lm
with pytest.raises(ValueError):
assert ('foo', ) in lm
with pytest.raises(ValueError):
assert 'a little' in lm
with pytest.raises(ValueError):
assert ('a', 'little') in lm
def test_new_model_contains_not():
lm = ARPAModelSimple()
assert 'foo' not in lm
with pytest.raises(ValueError):
assert ('foo', ) not in lm
with pytest.raises(ValueError):
assert 'a little' not in lm
with pytest.raises(ValueError):
assert ('a', 'little') not in lm
def test_new_model_counts():
lm = ARPAModelSimple()
assert lm.counts() == []
def test_new_model_len():
lm = ARPAModelSimple()
assert len(lm) == 0
def test_log_p_raw():
lm = ARPAModelSimple()
with pytest.raises(KeyError):
lm.log_p_raw('UnladenSwallow')
def test_log_p_empty_string():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p('')
def test_log_p_empty_tuple():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p(tuple())
def test_log_p_int():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_p(1)
def test_log_s_int():
lm = ARPAModelSimple()
with pytest.raises(ValueError):
lm.log_s(1)
def test_input_equality():
lm = ARPAModelSimple()
with pytest.raises(KeyError):
assert lm.p('foo') == lm.p(('foo', ))
with pytest.raises(KeyError):
assert lm.p('xxx') == lm.p(('xxx', ))
with pytest.raises(KeyError):
assert lm.p('a little') == lm.p(('a', 'little'))
with pytest.raises(KeyError):
assert lm.p('xxx little') == lm.p(('xxx', 'little'))
lm = arpa.loadf(TEST_ARPA)[0]
assert lm.p('foo') == lm.p(('foo', ))
assert lm.p('xxx') == lm.p(('xxx', ))
assert lm.p('a little') == lm.p(('a', 'little'))
assert lm.p('xxx little') == lm.p(('xxx', 'little'))
def test_check_input_list():
result = ARPAModel._check_input(['foo', 'bar'])
assert isinstance(result, tuple)
def test_check_input_string_word():
result = ARPAModel._check_input('foo')
assert isinstance(result, tuple) and len(result) == 1
def test_check_input_string_words():
result = ARPAModel._check_input('foo bar')
assert isinstance(result, tuple) and len(result) == 2
def test_new_model_order():
lm = ARPAModelSimple()
assert lm.order() is None
for p in PARSERS:
lm = arpa.loadf(TEST_ARPA, parser=p)[0]
assert lm.order() == 5
| mit |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/purchase_double_validation/__openerp__.py | 260 | 1920 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Double Validation on Purchases',
'version' : '1.1',
'category': 'Purchase Management',
'depends' : ['base','purchase'],
'author' : 'OpenERP SA',
'description': """
Double-validation for purchases exceeding minimum amount.
=========================================================
This module modifies the purchase workflow in order to validate purchases that
exceeds minimum amount set by configuration wizard.
""",
'website': 'https://www.odoo.com/page/purchase',
'data': [
'purchase_double_validation_workflow.xml',
'purchase_double_validation_installer.xml',
'purchase_double_validation_view.xml',
],
'test': [
'test/purchase_double_validation_demo.yml',
'test/purchase_double_validation_test.yml'
],
'demo': [],
'installable': True,
'auto_install': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alshedivat/tensorflow | tensorflow/contrib/batching/python/ops/batch_ops.py | 15 | 7839 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for automatic batching and unbatching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_batch_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_batch_ops import *
# pylint: enable=wildcard-import
@ops.RegisterGradient("Batch")
def _BatchGrad(op, *out_grads): # pylint: disable=invalid-name
"""Gradient for batch op."""
gradients = []
for i in range(len(op.inputs)):
gradients.append(
gen_batch_ops.unbatch(
out_grads[i],
op.outputs[-2],
op.outputs[-1],
timeout_micros=op.get_attr("grad_timeout_micros"),
shared_name="batch_gradient_{}_{}".format(op.name, i)))
return gradients
@ops.RegisterGradient("Unbatch")
def _UnbatchGrad(op, grad): # pylint: disable=invalid-name
return [
gen_batch_ops.unbatch_grad(
op.inputs[0],
op.inputs[1],
grad,
op.inputs[2],
shared_name="unbatch_gradient_{}".format(op.name)), None, None
]
def batch_function(num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Batches the computation done by the decorated function.
So, for example, in the following code
```python
@batch_function(1, 2, 3)
def layer(a):
return tf.matmul(a, a)
b = layer(w)
```
if more than one session.run call is simultaneously trying to compute `b`
the values of `w` will be gathered, non-deterministically concatenated
along the first axis, and only one thread will run the computation. See the
documentation of the `Batch` op for more details.
Assumes that all arguments of the decorated function are Tensors which will
be batched along their first dimension.
SparseTensor is not supported. The return value of the decorated function
must be a Tensor or a list/tuple of Tensors.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The decorated function will return the unbatched computation output Tensors.
"""
def decorator(fn): # pylint: disable=missing-docstring
def decorated(*args): # pylint: disable=missing-docstring
types = [arg.dtype for arg in args]
@function.Defun(*types)
def computation(*computation_args):
return fn(*computation_args)
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
return gen_batch_ops.batch_function(
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches,
shared_name=name,
f=computation,
in_tensors=list(args),
captured_tensors=computation.captured_inputs,
Tout=[o.type for o in computation.definition.signature.output_arg])
return decorated
return decorator
def batch_function_v1(num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
grad_timeout_micros=60 * 1000 * 1000,
unbatch_timeout_micros=60 * 1000 * 1000,
max_enqueued_batches=10):
"""Batches the computation done by the decorated function.
This is the older version of batch_function(). Please use the former instead
of this.
Args:
num_batch_threads: Number of scheduling threads for processing batches
of work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
grad_timeout_micros: The timeout to use for the gradient. See the
documentation of the unbatch op for more details. Defaults to 60s.
unbatch_timeout_micros: The timeout to use for unbatching. See the
documentation of the unbatch op for more details. Defaults to 60s.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The decorated function will return the unbatched computation output Tensors.
"""
def decorator(f): # pylint: disable=missing-docstring
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
max_enqueued_batches=max_enqueued_batches,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name + "/" + t.name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
return decorated
return decorator
| apache-2.0 |
mahak/cinder | cinder/tests/unit/backup/fake_google_client2.py | 2 | 3730 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2016 Vedams Inc.
# Copyright (C) 2016 Google Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
class FakeGoogleObjectInsertExecute(object):
def execute(self, *args, **kwargs):
return {u'md5Hash': u'Z2NzY2luZGVybWQ1'}
class FakeGoogleObjectListExecute(object):
def __init__(self, *args, **kwargs):
self.bucket_name = kwargs['bucket']
self.prefix = kwargs['prefix']
def execute(self, *args, **kwargs):
bucket_dir = tempfile.gettempdir() + '/' + self.bucket_name
fake_body = []
for f in os.listdir(bucket_dir):
try:
f.index(self.prefix)
fake_body.append({'name': f})
except Exception:
pass
return {'items': fake_body}
class FakeGoogleBucketListExecute(object):
def execute(self, *args, **kwargs):
return {u'items': [{u'name': u'gcscinderbucket'},
{u'name': u'gcsbucket'}]}
class FakeGoogleBucketInsertExecute(object):
def execute(self, *args, **kwargs):
pass
class FakeMediaObject(object):
def __init__(self, *args, **kwargs):
self.bucket_name = kwargs['bucket']
self.object_name = kwargs['object']
class FakeGoogleObject(object):
def insert(self, *args, **kwargs):
object_path = (tempfile.gettempdir() + '/' + kwargs['bucket'] + '/' +
kwargs['name'])
kwargs['media_body']._fd.getvalue()
with open(object_path, 'wb') as object_file:
kwargs['media_body']._fd.seek(0)
object_file.write(kwargs['media_body']._fd.read())
return FakeGoogleObjectInsertExecute()
def get_media(self, *args, **kwargs):
return FakeMediaObject(*args, **kwargs)
def list(self, *args, **kwargs):
return FakeGoogleObjectListExecute(*args, **kwargs)
class FakeGoogleBucket(object):
def list(self, *args, **kwargs):
return FakeGoogleBucketListExecute()
def insert(self, *args, **kwargs):
return FakeGoogleBucketInsertExecute()
class FakeGoogleDiscovery(object):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
@classmethod
def Build(cls, *args, **kargs):
return FakeDiscoveryBuild()
class FakeDiscoveryBuild(object):
"""Logging calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
def objects(self):
return FakeGoogleObject()
def buckets(self):
return FakeGoogleBucket()
class FakeGoogleCredentials(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def from_stream(cls, *args, **kwargs):
pass
class FakeGoogleMediaIoBaseDownload(object):
def __init__(self, fh, req, chunksize=None):
object_path = (tempfile.gettempdir() + '/' + req.bucket_name + '/' +
req.object_name)
with open(object_path, 'rb') as object_file:
fh.write(object_file.read())
def next_chunk(self, **kwargs):
return (100, True)
| apache-2.0 |
landryb/QGIS | python/plugins/db_manager/db_plugins/spatialite/data_model.py | 30 | 2332 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from ..data_model import TableDataModel, SqlResultModel
class SLTableDataModel(TableDataModel):
def __init__(self, table, parent=None):
TableDataModel.__init__(self, table, parent)
fields_txt = u", ".join(self.fields)
table_txt = self.db.quoteId((self.table.schemaName(), self.table.name))
# run query and get results
sql = u"SELECT %s FROM %s" % (fields_txt, table_txt)
c = self.db._get_cursor()
self.db._execute(c, sql)
self.resdata = self.db._fetchall(c)
c.close()
del c
self.fetchedFrom = 0
self.fetchedCount = len(self.resdata)
def _sanitizeTableField(self, field):
# get fields, ignore geometry columns
dataType = field.dataType.upper()
if dataType[:5] == "MULTI":
dataType = dataType[5:]
if dataType[-3:] == "25D":
dataType = dataType[:-3]
if dataType[-10:] == "COLLECTION":
dataType = dataType[:-10]
if dataType in ["POINT", "LINESTRING", "POLYGON", "GEOMETRY"]:
return u'GeometryType(%s)' % self.db.quoteId(field.name)
return self.db.quoteId(field.name)
def rowCount(self, index=None):
return self.fetchedCount
class SLSqlResultModel(SqlResultModel):
pass
| gpl-2.0 |
AmperificSuperKANG/lge-kernel-d802 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/http/multipartparser.py | 332 | 24331 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| artistic-2.0 |
kaiweifan/horizon | openstack_dashboard/dashboards/project/instances/forms.py | 9 | 3902 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from horizon.utils import validators
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images_and_snapshots import utils
def _image_choice_title(img):
gb = filesizeformat(img.bytes)
return '%s (%s)' % (img.display_name, gb)
class RebuildInstanceForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
image = forms.ChoiceField(label=_("Select Image"),
widget=fields.SelectWidget(attrs={'class': 'image-selector'},
data_attrs=('size', 'display-name'),
transform=_image_choice_title))
password = forms.RegexField(label=_("Rebuild Password"),
required=False,
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(label=_("Confirm Rebuild Password"),
required=False,
widget=forms.PasswordInput(render_value=False))
def __init__(self, request, *args, **kwargs):
super(RebuildInstanceForm, self).__init__(request, *args, **kwargs)
instance_id = kwargs.get('initial', {}).get('instance_id')
self.fields['instance_id'].initial = instance_id
images = utils.get_available_images(request, request.user.tenant_id)
choices = [(image.id, image.name) for image in images]
if choices:
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available.")))
self.fields['image'].choices = choices
def clean(self):
cleaned_data = super(RebuildInstanceForm, self).clean()
if 'password' in cleaned_data:
passwd = cleaned_data.get('password')
confirm = cleaned_data.get('confirm_password')
if passwd is not None and confirm is not None:
if passwd != confirm:
raise forms.ValidationError(_("Passwords do not match."))
return cleaned_data
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data', 'password')
def handle(self, request, data):
instance = data.get('instance_id')
image = data.get('image')
password = data.get('password') or None
try:
api.nova.server_rebuild(request, instance, image, password)
messages.success(request, _('Rebuilding instance %s.') % instance)
except Exception:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(request, _("Unable to rebuild instance."),
redirect=redirect)
return True
| apache-2.0 |
wrouesnel/ansible | lib/ansible/modules/network/ios/ios_static_route.py | 12 | 7396 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on Cisco IOS network devices
description:
- This module provides declarative management of static
IP routes on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
requirements:
- Python >= 3.3 or C(ipaddress) python package
options:
prefix:
description:
- Network prefix of the static route.
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions.
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: configure static route
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0 255.255.255.0 10.0.0.1
"""
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import load_config, run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
import re
try:
from ipaddress import ip_network
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s %s' % (prefix, mask, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %s %s' % (prefix, mask, next_hop,
admin_distance))
return commands
def map_config_to_obj(module):
obj = []
rc, out, err = exec_command(module, 'show ip static route')
match = re.search(r'.*Static local RIB for default\s*(.*)$', out, re.DOTALL)
if match and match.group(1):
for r in match.group(1).splitlines():
splitted_line = r.split()
code = splitted_line[0]
if code != 'M':
continue
cidr = ip_network(to_text(splitted_line[1]))
prefix = str(cidr.network_address)
mask = str(cidr.netmask)
next_hop = splitted_line[4]
admin_distance = splitted_line[2][1]
obj.append({'prefix': prefix, 'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
return obj
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'].strip(),
'mask': module.params['mask'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': str(module.params['admin_distance']),
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'mask', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if not HAS_IPADDRESS:
module.fail_json(msg="ipaddress python package is required")
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
t0mm0/youtube-dl | youtube_dl/extractor/teachertube.py | 148 | 4651 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
qualities,
determine_ext,
)
class TeacherTubeIE(InfoExtractor):
IE_NAME = 'teachertube'
IE_DESC = 'teachertube.com videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
'md5': 'f9434ef992fd65936d72999951ee254c',
'info_dict': {
'id': '339997',
'ext': 'mp4',
'title': 'Measures of dispersion from a frequency table',
'description': 'Measures of dispersion from a frequency table',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://www.teachertube.com/viewVideo.php?video_id=340064',
'md5': '0d625ec6bc9bf50f70170942ad580676',
'info_dict': {
'id': '340064',
'ext': 'mp4',
'title': 'How to Make Paper Dolls _ Paper Art Projects',
'description': 'Learn how to make paper dolls in this simple',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://www.teachertube.com/music.php?music_id=8805',
'md5': '01e8352006c65757caf7b961f6050e21',
'info_dict': {
'id': '8805',
'ext': 'mp3',
'title': 'PER ASPERA AD ASTRA',
'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
},
}, {
'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
'md5': '9c79fbb2dd7154823996fc28d4a26998',
'info_dict': {
'id': '297790',
'ext': 'mp4',
'title': 'Intro Video - Schleicher',
'description': 'Intro Video - Why to flip, how flipping will',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta('title', webpage, 'title', fatal=True)
TITLE_SUFFIX = ' - TeacherTube'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)].strip()
description = self._html_search_meta('description', webpage, 'description')
if description:
description = description.strip()
quality = qualities(['mp3', 'flv', 'mp4'])
media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))
formats = [
{
'url': media_url,
'quality': quality(determine_ext(media_url))
} for media_url in set(media_urls)
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': self._html_search_regex(r'\'image\'\s*:\s*["\']([^"\']+)["\']', webpage, 'thumbnail'),
'formats': formats,
'description': description,
}
class TeacherTubeUserIE(InfoExtractor):
IE_NAME = 'teachertube:user:collection'
IE_DESC = 'teachertube.com user and collection videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
_MEDIA_RE = r'''(?sx)
class="?sidebar_thumb_time"?>[0-9:]+</div>
\s*
<a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
'''
_TEST = {
'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
'info_dict': {
'id': 'rbhagwati2'
},
'playlist_mincount': 179,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user')
urls = []
webpage = self._download_webpage(url, user_id)
urls.extend(re.findall(self._MEDIA_RE, webpage))
pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
for p in pages:
more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages)))
video_urls = re.findall(self._MEDIA_RE, webpage)
urls.extend(video_urls)
entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
return self.playlist_result(entries, user_id)
| unlicense |
hrh5775/LibraryManager | PythonTestClient/LibraryManagerTestClient/venv/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py | 395 | 25647 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| gpl-3.0 |
cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/BuildSlaveSupport/build.webkit.org-config/webkit/steps.py | 3 | 10156 | from webkit.basesteps import ShellCommand, SVN, Test, Compile, UploadCommand
from buildbot.status.builder import SUCCESS, FAILURE, WARNINGS
class CheckOutSource(SVN):
baseURL = "http://svn.webkit.org/repository/webkit/"
mode = "update"
def __init__(self, *args, **kwargs):
SVN.__init__(self, baseURL=self.baseURL, defaultBranch="trunk", mode=self.mode, *args, **kwargs)
class SetConfiguration(ShellCommand):
command = ["perl", "./WebKitTools/Scripts/set-webkit-configuration"]
def __init__(self, *args, **kwargs):
configuration = kwargs.pop('configuration')
self.command = self.command + ['--' + configuration]
self.name = "set-configuration-%s" % (configuration, )
self.description = ["set configuration %s" % (configuration, )]
self.descriptionDone = ["set configuration %s" % (configuration, )]
ShellCommand.__init__(self, *args, **kwargs)
class LayoutTest(Test):
name = "layout-test"
description = ["layout-tests running"]
descriptionDone = ["layout-tests"]
command = ["perl", "./WebKitTools/Scripts/run-webkit-tests", "--no-launch-safari", "--no-new-test-results", "--no-sample-on-timeout", "--results-directory", "layout-test-results"]
def commandComplete(self, cmd):
Test.commandComplete(self, cmd)
logText = cmd.logs['stdio'].getText()
incorrectLayoutLines = [line for line in logText.splitlines() if line.find('had incorrect layout') >= 0 or (line.find('test case') >= 0 and (line.find(' crashed') >= 0 or line.find(' timed out') >= 0))]
self.incorrectLayoutLines = incorrectLayoutLines
def evaluateCommand(self, cmd):
if self.incorrectLayoutLines or cmd.rc != 0:
return FAILURE
return SUCCESS
def getText(self, cmd, results):
return self.getText2(cmd, results)
def getText2(self, cmd, results):
if results != SUCCESS and self.incorrectLayoutLines:
return self.incorrectLayoutLines
return [self.name]
class JavaScriptCoreTest(Test):
name = "jscore-test"
description = ["jscore-tests running"]
descriptionDone = ["jscore-tests"]
command = ["perl", "./WebKitTools/Scripts/run-javascriptcore-tests"]
logfiles = {'results': 'JavaScriptCore/tests/mozilla/actual.html'}
def commandComplete(self, cmd):
Test.commandComplete(self, cmd)
logText = cmd.logs['stdio'].getText()
statusLines = [line for line in logText.splitlines() if line.find('regression') >= 0 and line.find(' found.') >= 0]
if statusLines and statusLines[0].split()[0] != '0':
self.regressionLine = statusLines[0]
else:
self.regressionLine = None
def evaluateCommand(self, cmd):
if self.regressionLine:
return FAILURE
if cmd.rc != 0:
return FAILURE
return SUCCESS
def getText(self, cmd, results):
return self.getText2(cmd, results)
def getText2(self, cmd, results):
if results != SUCCESS and self.regressionLine:
return [self.name, self.regressionLine]
return [self.name]
class PixelLayoutTest(LayoutTest):
name = "pixel-layout-test"
description = ["pixel-layout-tests running"]
descriptionDone = ["pixel-layout-tests"]
command = LayoutTest.command + ["--pixel", "--tolerance", "0.1"]
class LeakTest(LayoutTest):
command = ["perl", "./WebKitTools/Scripts/run-webkit-tests", "--no-launch-safari", "--no-sample-on-timeout", "--leaks", "--results-directory", "layout-test-results"]
def commandComplete(self, cmd):
LayoutTest.commandComplete(self, cmd)
logText = cmd.logs['stdio'].getText()
lines = logText.splitlines()
leakLines = [line for line in lines if line.find('total leaks found!') >= 0]
leakLines += [line for line in lines if line.find('LEAK: ') >= 0]
leakLines = [' '.join(x.split()[1:]) for x in leakLines]
leakSummary = {}
for line in leakLines:
count, key = line.split(' ', 1)
if key.find('total leaks found!') >= 0:
key = 'allocations found by "leaks" tool'
leakSummary[key] = leakSummary.get(key, 0) + int(count)
leakSummaryLines = []
for key in sorted(leakSummary.keys()):
leakSummaryLines.append('%s %s' % (leakSummary[key], key))
self.incorrectLayoutLines += leakSummaryLines
class UploadLayoutResults(UploadCommand, ShellCommand):
name = "upload-results"
description = ["uploading results"]
descriptionDone = ["uploaded-results"]
command = "echo Disabled for now"
def __init__(self, *args, **kwargs):
ShellCommand.__init__(self, *args, **kwargs)
def setBuild(self, build):
ShellCommand.setBuild(self, build)
self.initializeForUpload()
self.command = '''
if [[ -d layout-test-results ]]; then \
find layout-test-results -type d -print0 | xargs -0 chmod ug+rx; \
find layout-test-results -type f -print0 | xargs -0 chmod ug+r; \
rsync -rlvzP --rsync-path=/home/buildresults/bin/rsync layout-test-results/ %s && rm -rf layout-test-results; \
fi; \
CRASH_LOG=~/Library/Logs/CrashReporter/DumpRenderTree*.crash*; \
if [[ -f $(ls -1 $CRASH_LOG | head -n 1 ) ]]; then \
chmod ug+r $CRASH_LOG; \
rsync -rlvzP --rsync-path=/home/buildresults/bin/rsync $CRASH_LOG %s && rm -rf $CRASH_LOG; \
fi; ''' % (self.getRemotePath(), self.getRemotePath())
self.addFactoryArguments(command=self.command)
class CompileWebKit(Compile):
command = ["perl", "./WebKitTools/Scripts/build-webkit"]
env = {'WEBKITSUPPORTLIBRARIESZIPDIR': 'C:\\cygwin\\home\\buildbot', 'MFLAGS':''}
def __init__(self, *args, **kwargs):
configuration = kwargs.pop('configuration')
self.name = "compile-" + configuration
self.description = ["compiling " + configuration]
self.descriptionDone = ["compiled " + configuration]
Compile.__init__(self, env=self.env, *args, **kwargs)
class CleanWebKit(CompileWebKit):
command = CompileWebKit.command + ['--clean']
description = ['cleaning']
descriptionDone = ['cleaned']
class CompileWebKitNoSVG(CompileWebKit):
command = 'rm -rf WebKitBuild && perl ./WebKitTools/Scripts/build-webkit --no-svg'
class CompileWebKitGtk(CompileWebKit):
command = ['perl', './WebKitTools/Scripts/build-webkit', '--gtk', '--qmake=qmake-qt4']
class CleanWebKitGtk(CompileWebKitGtk):
command = CompileWebKitGtk.command + ['--clean']
description = ['cleaning']
descriptionDone = ['cleaned']
class CompileWebKitWx(CompileWebKit):
command = ['perl', './WebKitTools/Scripts/build-webkit', '--wx']
class CleanWebKitWx(CompileWebKitWx):
command = CompileWebKitWx.command + ['--clean']
description = ['cleaning']
descriptionDone = ['cleaned']
class CompileWebKitWindows(UploadCommand, CompileWebKit):
def setBuild(self, build):
CompileWebKit.setBuild(self, build)
self.initializeForUpload()
self.command = '''\
./WebKitTools/Scripts/build-webkit; \
RESULT=$?
for log in $(find WebKitBuild/*/*/*/*.htm); do \
chmod ug+r $log; \
REMOTE_NAME=$(echo $log | sed -e 's|WebKitBuild/obj/||' -e 's|/Release/|-|' -e 's|/Debug/|-|'); \
rsync -rlvzP --rsync-path="/home/buildresults/bin/rsync" $log %s/$REMOTE_NAME && rm $log; \
done; \
exit $RESULT;''' % (self.getRemotePath(), )
self.addFactoryArguments(command=self.command)
class LayoutTestWindows(LayoutTest):
env = {'WEBKIT_TESTFONTS': 'C:\\cygwin\\home\\buildbot\\WebKitTestFonts'}
def __init__(self, *args, **kwargs):
return LayoutTest.__init__(self, env=self.env, *args, **kwargs)
class JavaScriptCoreTestGtk(JavaScriptCoreTest):
command = JavaScriptCoreTest.command + ['--gtk']
class JavaScriptCoreTestWx(JavaScriptCoreTest):
command = JavaScriptCoreTest.command + ['--wx']
class LayoutTestQt(LayoutTest):
command = LayoutTest.command + ['--qt']
class InstallWin32Dependencies(ShellCommand):
description = ["installing Windows dependencies"]
descriptionDone = ["installed Windows dependencies"]
command = ["perl", "./WebKitTools/Scripts/update-webkit-auxiliary-libs"]
# class UploadDiskImage(UploadCommand, ShellCommand):
# description = ["uploading disk image"]
# descriptionDone = ["uploaded disk image"]
# name = "upload-disk-image"
# def __init__(self, *args, **kwargs):
# UploadCommand.__init__(self, *args, **kwargs)
# self.command = 'umask 002 && ./WebKitTools/BuildSlaveSupport/build-launcher-app && ./WebKitTools/BuildSlaveSupport/build-launcher-dmg --upload-to-host %s' % (self.getRemotePath(), )
# ShellCommand.__init__(self, *args, **kwargs)
class GenerateCoverageData(Compile):
command = ["perl", "./WebKitTools/Scripts/generate-coverage-data"]
description = ["generating coverage data"]
descriptionDone = ["generated coverage data"]
class UploadCoverageData(UploadCommand, ShellCommand):
name = "upload-coverage-data"
description = ["uploading coverage data"]
descriptionDone = ["uploaded-coverage-data"]
command = "echo Disabled for now"
def __init__(self, *args, **kwargs):
ShellCommand.__init__(self, *args, **kwargs)
def setBuild(self, build):
ShellCommand.setBuild(self, build)
self.initializeForUpload()
self.command = '''\
if [[ -d WebKitBuild/Coverage/html ]]; then \
find WebKitBuild/Coverage/html -type d -print0 | xargs -0 chmod ug+rx; \
find WebKitBuild/Coverage/html -type f -print0 | xargs -0 chmod ug+r; \
rsync -rlvzP --rsync-path="/home/buildresults/bin/rsync" WebKitBuild/Coverage/html/ %s && rm -rf WebKitBuild/Coverage/html; \
fi;''' % (self.getRemotePath(), )
self.addFactoryArguments(command=self.command)
def getURLPath(self):
return "/results/code-coverage/"
| gpl-2.0 |
compiteing/flask-ponypermission | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py | 678 | 9406 | import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
assert proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % proxy.scheme
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| mit |
augustynSybidlo/Legend-of-the-Eternal-Castle | end_game.py | 1 | 1229 | the_end = '''
_ __ ___ _ _____ __ ___ _ ___
(__ __) \ | | / \ ___) \ ___) | \ | | | \
| | | \_/ | | (__ | (__ | |\ \ | | | | |
| | | _ | | __) | __) | | \ \| | | | |
| | | / \ | | (___ | (___ | | \ | | | |
___| |____/ |___| \_/ )____/ )_| |___\ |_| /___
You have defetad the evil dwarf! Thank you so much! Here is 500+ gold coins!'''
the_end_2 = '''
_ __ ___ _ _____ __ ___ _ ___
(__ __) \ | | / \ ___) \ ___) | \ | | | \
| | | \_/ | | (__ | (__ | |\ \ | | | | |
| | | _ | | __) | __) | | \ \| | | | |
| | | / \ | | (___ | (___ | | \ | | | |
___| |____/ |___| \_/ )____/ )_| |___\ |_| /___
You are dead... so you can't really see it, shame.''' | gpl-3.0 |
mdietrichc2c/OCB | addons/portal_project/tests/test_access_rights.py | 65 | 15194 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.project.tests.test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestPortalProjectBase(TestProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
cr, uid = self.cr, self.uid
# Find Portal group
group_portal_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal')
self.group_portal_id = group_portal_ref and group_portal_ref[1] or False
# Find Public group
group_public_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_public')
self.group_public_id = group_public_ref and group_public_ref[1] or False
# # Test users to use through the various tests
self.user_portal_id = self.res_users.create(cr, uid, {
'name': 'Chell Portal',
'login': 'chell',
'alias_name': 'chell',
'groups_id': [(6, 0, [self.group_portal_id])]
})
self.user_public_id = self.res_users.create(cr, uid, {
'name': 'Donovan Public',
'login': 'donovan',
'alias_name': 'donovan',
'groups_id': [(6, 0, [self.group_public_id])]
})
self.user_manager_id = self.res_users.create(cr, uid, {
'name': 'Eustache Manager',
'login': 'eustache',
'alias_name': 'eustache',
'groups_id': [(6, 0, [self.group_project_manager_id])]
})
# Test 'Pigs' project
self.project_pigs_id = self.project_project.create(cr, uid, {
'name': 'Pigs', 'privacy_visibility': 'public'}, {'mail_create_nolog': True})
# Various test tasks
self.task_1_id = self.project_task.create(cr, uid, {
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_2_id = self.project_task.create(cr, uid, {
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_3_id = self.project_task.create(cr, uid, {
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_4_id = self.project_task.create(cr, uid, {
'name': 'Test4', 'user_id': self.user_projectuser_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_5_id = self.project_task.create(cr, uid, {
'name': 'Test5', 'user_id': self.user_portal_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_6_id = self.project_task.create(cr, uid, {
'name': 'Test6', 'user_id': self.user_public_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
class TestPortalProject(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
cr, uid, pigs_id = self.cr, self.uid, self.project_pigs_id
# ----------------------------------------
# CASE1: public project
# ----------------------------------------
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_projectuser_id, task_ids, ['name'])
# Test: all project tasks writable
self.project_task.write(cr, self.user_projectuser_id, task_ids, {'description': 'TestDescription'})
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Test: no project task visible
self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)])
# Test: no project task readable
self.assertRaises(AccessError, self.project_task.read, cr, self.user_none_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_none_id, task_ids, {'description': 'TestDescription'})
# Do: Chell reads project -> ok (portal ok public)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_portal_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_portal_id, task_ids, {'description': 'TestDescription'})
# Do: Donovan reads project -> ok (public)
self.project_project.read(cr, self.user_public_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: public user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_public_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_public_id, task_ids, {'description': 'TestDescription'})
# ----------------------------------------
# CASE2: portal project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'portal'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a portal project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Test: no project task searchable
self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)])
# Data: task follower
self.project_task.message_subscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id])
# Do: Chell reads project -> ok (portal ok public)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: only followed project tasks visible + assigned
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: portal user should see the followed tasks of a portal project')
# Do: Donovan reads project -> ko (public ko portal)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of a portal project')
# Data: task follower cleaning
self.project_task.message_unsubscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id])
# ----------------------------------------
# CASE3: employee project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'employees'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ok (employee ok employee)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of an employees project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Do: Chell reads project -> ko (portal ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state'])
# Test: no project task visible + assigned
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: portal user should not see tasks of an employees project, even if assigned')
# Do: Donovan reads project -> ko (public ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of an employees project')
# ----------------------------------------
# CASE4: followers project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'followers'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ko (employee ko followers)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_4_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see tasks of a not-followed followers project, only assigned')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Do: Chell reads project -> ko (portal ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: portal user should not see tasks of a not-followed followers project, only assigned')
# Do: Donovan reads project -> ko (public ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of a followers project')
# Data: subscribe Alfred, Chell and Donovan as follower
self.project_project.message_subscribe_users(cr, uid, [pigs_id], [self.user_projectuser_id, self.user_portal_id, self.user_public_id])
self.project_task.message_subscribe_users(cr, self.user_manager_id, [self.task_1_id, self.task_3_id], [self.user_portal_id, self.user_projectuser_id])
# Do: Alfred reads project -> ok (follower ok followers)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: followed + assigned tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_4_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see followed + assigned tasks of a follower project')
# Do: Chell reads project -> ok (follower ok follower)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: followed + assigned tasks visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see followed + assigned tasks of a follower project')
# Do: Donovan reads project -> ko (public ko follower even if follower)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
| agpl-3.0 |
lordtangent/arsenalsuite | cpp/apps/freezer/afplugins/sent_email.py | 11 | 1394 |
from blur.Stone import *
from blur.Classes import *
from blur.Freezer import *
from PyQt4.QtCore import *
from PyQt4.QtSql import *
import traceback, os
import subprocess
class ToDeleteViewerPlugin(JobViewerPlugin):
def __init__(self):
JobViewerPlugin.__init__(self)
def name(self):
return QString("Indicate that an email has been sent regarding the job")
def icon(self):
return QString("images/emailed.png")
def view(self, jobList):
for job in jobList:
if job.toggleFlags() & 0x00000001:
# toggle wrangle to off
job.setToggleFlags(job.toggleFlags() ^ 0x00000001)
job.commit()
jh = JobHistory()
jh.setHost(Host.currentHost())
jh.setUser(User.currentUser())
jh.setJob(job)
jh.setMessage("Removed 'email sent' flag")
jh.commit()
else:
# toggle on and log history
job.setToggleFlags(job.toggleFlags() ^ 0x00000001)
job.commit()
jh = JobHistory()
jh.setHost(Host.currentHost())
jh.setUser(User.currentUser())
jh.setJob(job)
jh.setMessage("Email sent toggled")
jh.commit()
JobViewerFactory.registerPlugin( ToDeleteViewerPlugin() )
| gpl-2.0 |
abimannans/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
BIT-SYS/gem5-spm-module | src/cpu/kvm/KvmVM.py | 57 | 2399 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class KvmVM(SimObject):
type = 'KvmVM'
cxx_header = "cpu/kvm/vm.hh"
system = Param.System(Parent.any, "system object")
coalescedMMIO = VectorParam.AddrRange([], "memory ranges for coalesced MMIO")
| bsd-3-clause |
MatthewWilkes/django-oscar | src/oscar/apps/dashboard/users/forms.py | 27 | 1400 | from django import forms
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from oscar.core.loading import get_model
from oscar.core.compat import get_user_model
User = get_user_model()
ProductAlert = get_model('customer', 'ProductAlert')
class UserSearchForm(forms.Form):
email = forms.CharField(required=False, label=_("Email"))
name = forms.CharField(
required=False, label=pgettext_lazy(u"User's name", u"Name"))
class ProductAlertUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductAlertUpdateForm, self).__init__(*args, **kwargs)
alert = kwargs['instance']
if alert.user:
# Remove 'unconfirmed' from list of available choices when editing
# an alert for a real user
choices = self.fields['status'].choices
del choices[0]
self.fields['status'].choices = choices
class Meta:
model = ProductAlert
fields = [
'status',
]
class ProductAlertSearchForm(forms.Form):
STATUS_CHOICES = (
('', '------------'),
) + ProductAlert.STATUS_CHOICES
status = forms.ChoiceField(required=False, choices=STATUS_CHOICES,
label=_('Status'))
name = forms.CharField(required=False, label=_('Name'))
email = forms.EmailField(required=False, label=_('Email'))
| bsd-3-clause |
SIFTeam/enigma2 | lib/python/Components/Sources/FrontendInfo.py | 35 | 1642 | from enigma import iPlayableService
from Source import Source
from Components.PerServiceDisplay import PerServiceBase
class FrontendInfo(Source, PerServiceBase):
def __init__(self, service_source = None, frontend_source = None, navcore = None):
self.navcore = None
Source.__init__(self)
if navcore:
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evTunedIn: self.updateFrontendData,
iPlayableService.evEnd: self.serviceEnd
})
self.service_source = service_source
self.frontend_source = frontend_source
self.updateFrontendData()
def serviceEnd(self):
# import pdb
# pdb.set_trace()
self.slot_number = self.frontend_type = None
self.changed((self.CHANGED_CLEAR, ))
def updateFrontendData(self):
data = self.getFrontendData()
if not data:
self.slot_number = self.frontend_type = None
else:
self.slot_number = data.get("tuner_number")
self.frontend_type = data.get("tuner_type")
self.changed((self.CHANGED_ALL, ))
def getFrontendData(self):
if self.frontend_source:
frontend = self.frontend_source()
dict = { }
if frontend:
frontend.getFrontendData(dict)
return dict
elif self.service_source:
service = self.navcore and self.service_source()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
elif self.navcore:
service = self.navcore.getCurrentService()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
else:
return None
def destroy(self):
if not self.frontend_source and not self.service_source:
PerServiceBase.destroy(self)
Source.destroy(self)
| gpl-2.0 |
bmockler/MOSFiT | mosfit/modules/arrays/diagonal.py | 5 | 6962 | """Definitions for the `Diagonal` class."""
from math import isnan
import numpy as np
from mosfit.modules.arrays.array import Array
from mosfit.utils import flux_density_unit
# Important: Only define one ``Module`` class per file.
class Diagonal(Array):
"""Calculate the diagonal/residuals for a model kernel."""
MIN_COV_TERM = 1.0e-30
def __init__(self, **kwargs):
"""Initialize module."""
super(Diagonal, self).__init__(**kwargs)
self._observation_types = np.array([])
def process(self, **kwargs):
"""Process module."""
self.preprocess(**kwargs)
self._model_observations = np.copy(kwargs['model_observations'])
self._model_observations = self._model_observations[self._observed]
ret = {}
allowed_otypes = ['countrate', 'magnitude', 'fluxdensity', 'magcount']
if np.any([x not in allowed_otypes for x in self._o_types]):
print([x for x in self._o_types if x not in allowed_otypes])
raise ValueError('Unrecognized observation type.')
# Calculate (model - obs) residuals.
residuals = np.array([
(abs(x - ct) if (not u and ct is not None) or (
not isnan(x) and ct is not None and x < ct) else 0.0)
if (t == 'countrate' or t == 'magcount') else
((abs(x - y) if (not u and y is not None) or (
not isnan(x) and y is not None and x < y) else 0.0)
if t == 'magnitude' else
((abs(x - fd) if (not u and fd is not None) or (
not isnan(x) and fd is not None and x > fd) else 0.0)
if t == 'fluxdensity' else None))
for x, y, ct, fd, u, t in zip(
self._model_observations, self._mags, self._cts, self._fds,
self._upper_limits, self._o_types)
])
if np.any(residuals == None): # noqa: E711
raise ValueError('Null residual.')
# Observational errors to be put in diagonal of error matrix.
diag = [
((ctel if (ct is not None and x > ct) else cteu))
if (t == 'countrate' or t == 'magcount') else
((el if (y is None or x > y) else eu))
if t == 'magnitude' else
((fdel if (fd is not None and x > fd) else fdeu))
if t == 'fluxdensity' else None
for x, y, eu, el, fd, fdeu, fdel, ct, ctel, cteu, t in zip(
self._model_observations, self._mags,
self._e_u_mags, self._e_l_mags, self._fds, self._e_u_fds,
self._e_l_fds, self._cts, self._e_l_cts, self._e_u_cts,
self._o_types)
]
diag = [0.0 if x is None else x for x in diag]
diag = np.array(diag) ** 2
if np.any(diag == None): # noqa: E711
raise ValueError('Null error.')
ret['kdiagonal'] = diag
ret['kresiduals'] = residuals
return ret
def preprocess(self, **kwargs):
"""Construct arrays of observations based on data keys."""
otypes = np.array(kwargs.get('observation_types', []))
if np.array_equiv(
otypes, self._observation_types) and self._preprocessed:
return
self._observation_types = otypes
self._mags = np.array(kwargs.get('magnitudes', []))
self._fds = np.array(kwargs.get('fluxdensities', []))
self._cts = np.array(kwargs.get('countrates', []))
self._e_u_mags = kwargs.get('e_upper_magnitudes', [])
self._e_l_mags = kwargs.get('e_lower_magnitudes', [])
self._e_mags = kwargs.get('e_magnitudes', [])
self._e_u_fds = kwargs.get('e_upper_fluxdensities', [])
self._e_l_fds = kwargs.get('e_lower_fluxdensities', [])
self._e_fds = kwargs.get('e_fluxdensities', [])
self._u_fds = kwargs.get('u_fluxdensities', [])
self._e_u_cts = kwargs.get('e_upper_countrates', [])
self._e_l_cts = kwargs.get('e_lower_countrates', [])
self._e_cts = kwargs.get('e_countrates', [])
self._u_cts = kwargs.get('u_countrates', [])
self._upper_limits = np.array(kwargs.get('upperlimits', []),
dtype=bool)
self._observed = np.array(kwargs.get('observed', []), dtype=bool)
self._o_types = self._observation_types[self._observed]
# Magnitudes first
# Note: Upper limits (censored data) currently treated as a
# half-Gaussian, this is very approximate and can be improved upon.
self._e_u_mags = [
kwargs['default_upper_limit_error']
if (e is None and eu is None and self._upper_limits[i]) else
(kwargs['default_no_error_bar_error']
if (e is None and eu is None) else (e if eu is None else eu))
for i, (e, eu) in enumerate(zip(self._e_mags, self._e_u_mags))
]
self._e_l_mags = [
kwargs['default_upper_limit_error']
if (e is None and el is None and self._upper_limits[i]) else
(kwargs['default_no_error_bar_error']
if (e is None and el is None) else (e if el is None else el))
for i, (e, el) in enumerate(zip(self._e_mags, self._e_l_mags))
]
# Ignore upperlimits for countrate if magnitude is present.
self._upper_limits[self._observation_types[
self._observed] == 'magcount'] = False
self._e_u_cts = [
c if (e is None and eu is None) else
e if eu is None else eu
for i, (c, e, eu) in enumerate(zip(
self._cts, self._e_cts, self._e_u_cts))
]
self._e_l_cts = [
c if (e is None and el is None) else
e if el is None else el
for i, (c, e, el) in enumerate(zip(
self._cts, self._e_cts, self._e_l_cts))
]
# Now flux densities
self._e_u_fds = [
v if (e is None and eu is None and self._upper_limits[i]) else
(v if (e is None and eu is None) else (e if eu is None else eu))
for i, (e, eu, v) in enumerate(
zip(self._e_fds, self._e_u_fds, self._fds))
]
self._e_l_fds = [
0.0 if self._upper_limits[i] else (
v if (e is None and el is None) else (e if el is None else el))
for i, (e, el, v) in enumerate(
zip(self._e_fds, self._e_l_fds, self._fds))
]
self._fds = np.array([
x / flux_density_unit(y) if x is not None else None
for x, y in zip(self._fds, self._u_fds)
])
self._e_u_fds = [
x / flux_density_unit(y) if x is not None else None
for x, y in zip(self._e_u_fds, self._u_fds)
]
self._e_l_fds = [
x / flux_density_unit(y) if x is not None else None
for x, y in zip(self._e_l_fds, self._u_fds)
]
self._preprocessed = True
| mit |
shlomif/PySolFC | pysollib/games/mahjongg/mahjongg3.py | 2 | 24825 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
from pysollib.games.mahjongg.mahjongg import r
# test
# r(5991, "AAA 1", ncards=4, layout="0daa")
# r(5992, "AAA 2", ncards=8, layout="0daadca")
# r(5993, "AAA 3", ncards=20, layout="0daaCabdacKbbdca" +
# "Ccbdcc")
# r(5994, "AAA 4", ncards=20, layout="0daaDabdacdcaDcb" +
# "dcc")
# ************************************************************************
# * game definitions
# ************************************************************************
r(5401, "Taipei", layout="0aagabbabdabjabl" +
"hccacfachhckadba" +
"ddhdehdghdiadjad" +
"lhecaefoegaehhek" +
"afcifehfgvfgifia" +
"fkagahgcageaggog" +
"gagihgkagmhhaahc" +
"ohehhfvhfhhhvhho" +
"hiahkhhmaiahidai" +
"eaigoigCigaiihij" +
"aimhjbajcojehjfv" +
"jfJjghjhvjhojiaj" +
"khjlakahkdakeakg" +
"okgCkgQkgakihkja" +
"kmhlbalcolehlfvl" +
"fJlghlhvlholialk" +
"hllamahmdameamgo" +
"mgCmgamihmjammhn" +
"aanconehnfvnfhnh" +
"vnhoniankhnmaoah" +
"ocaoeaogoogaoiho" +
"kaomapcipehpgvpg" +
"ipiapkhqcaqfoqga" +
"qhhqkarbardhrehr" +
"ghriarjarlhscasf" +
"ashhskatbatdatja" +
"tlaug")
r(5402, "Hare", layout="0aacaamacabccace" +
"ackbcmacobeacecb" +
"eebekcembeoofavf" +
"cofeofkvfmofobga" +
"cgcbgebgkcgmbgoa" +
"iabicbiebikbimai" +
"oakcakebkhakkakm" +
"amebmgbmiamkbogo" +
"ohboicqfcqhcqjas" +
"ejsfasgjshasijsj" +
"askCtgCtibuddufd" +
"uhdujbulovdCvgCv" +
"iovlbwddwfdwhdwj" +
"bwlcyfcyhcyjbAhb" +
"Ch")
r(5403, "Horse", layout="0bafbahbajbcdbch" +
"bclaedbefbehbeja" +
"elagfaghagjaifhi" +
"gaihhiiaijakfhkg" +
"akhhkiakjbmecmgc" +
"mibmkcodcofcohco" +
"jcolcqdcqfvqgcqh" +
"vqicqjcqlbsbcsfv" +
"sgcshvsicsjbsnot" +
"botnbubcudcufvug" +
"cuhvuicujculbunb" +
"wbcwdcwfcwhcwjcw" +
"lbwnbycayfbyhayj" +
"bymaAbaAnaCaaCo")
r(5404, "Rat", layout="0aaabacoadbaeaag" +
"bcacccccebcgvddo" +
"dgbeacecceebegag" +
"abgcbggagmbicbie" +
"aigaimckeckgckia" +
"kmblcblkcmevmfcm" +
"gvmhcmibmmamobnc" +
"Cngbnkhnocoevofc" +
"ogvohcoibomaoobp" +
"cbpkcqecqgcqiaqm" +
"bscbseasgasmauab" +
"ucbugaumbwacwccw" +
"ebwgvxdoxgbyacyc" +
"cyebygaAabAcoAdb" +
"AeaAg")
r(5405, "Tiger", layout="0baabacbambaobca" +
"bccbcmbcobebaegh" +
"ehaeibenbgbbggbg" +
"ibgnaibbidcifcih" +
"dijbilainakdhkea" +
"kfokfhkgakhpkhhk" +
"iakjokjhkkaklbme" +
"pmfbmgomhbmiomjb" +
"mkboeoofbogoohbo" +
"ipojbokbqeoqfbqg" +
"pqhbqioqjbqkbsdd" +
"sfcshcsjbslbubbu" +
"dbuhbulbunbwbbwi" +
"bwnbybbygbynbAbb" +
"AibAnbCbbCgbCn")
r(5406, "Ram", layout="0aacaaeaagaaihbe" +
"hbghbibccaceocea" +
"cgaciociackadaod" +
"chdehdihdkheabec" +
"aeepeeaeioeiaeka" +
"faofchfehfihfkhg" +
"abgcageogeaggagi" +
"ogiagkahahhehhgh" +
"hibicaieaigaiibk" +
"cblgbmcbmeamione" +
"hniankanmcocboev" +
"oebogaoiooihokho" +
"mbooopehpiapkapm" +
"bqcbqeaqibrgbscb" +
"ucaueaugauiavahv" +
"ehvghvihwabwcawe" +
"oweawgawiowiawka" +
"xaoxchxehxihxkhy" +
"abycayepyeayioyi" +
"aykazaozchzehzih" +
"zkbAcaAeoAeaAgaA" +
"ioAiaAkhBehBghBi" +
"aCcaCeaCgaCi")
r(5407, "Wedges", layout="0aagbaicakdamaca" +
"acibckccmbeaaeca" +
"ekbemcgabgcageag" +
"mdiacicbieaigeka" +
"dkcckebkgakiakoh" +
"lofmaemcdmecmgbm" +
"iamkammamoomohno" +
"eoaeocdoecogaoia" +
"oodqadqccqeaqgcs" +
"acscaseasmbuaauc" +
"aukbumawaawibwkc" +
"wmaygbyicykdym")
r(5408, "Monkey", layout="0aaahabaacoachad" +
"aaeaakbcaaceackh" +
"clacmocmhcnacood" +
"abeabeoofoagahgb" +
"agcaghbgobicbigb" +
"iiaimhinaioojgbk" +
"cdkebkgvkgdkibkk" +
"bkmolgdmebmgvmgd" +
"miongdoebogvogdo" +
"iaokholaomaooopg" +
"hpobqcdqebqgvqgd" +
"qiaqooqoorghroas" +
"ahsbascbsgasmaso" +
"auaaughuhauiawih" +
"wjawkowkhwlawmby" +
"maAchAdaAeoAehAf" +
"vAfaAgoAgCAghAhv" +
"AhaAioAiCAihAjvA" +
"jaAkoAkhAlaAmaCa" +
"hCbaCc")
r(5409, "Rooster", layout="0aaaaagabchcccce" +
"ccgadcvdfadiceec" +
"egaeohfoageagoog" +
"ohhoaiehifaigaim" +
"aiohjmbkeokfbkgo" +
"khbkiakkakmamccm" +
"evmfcmgvmhcmiamk" +
"anahncCnghoaaoco" +
"occoevofcogvohco" +
"iapaopahpchqaaqc" +
"oqcbqeoqfbqgvqgo" +
"qhbqiaqkaqmaraor" +
"ahrchrmhsaascbsg" +
"oshbsiaskasmasoa" +
"taotahtohuaaufhu" +
"gauhauoavabweowf" +
"bwgowhbwivxgayab" +
"ycoydbyeoyfbygoy" +
"hbyihzaaAaaAeaAj" +
"hAkaAlhBaaCaaCeh" +
"CfaCgaCl")
r(5410, "Dog", layout="0aaeaaghbehbgacc" +
"aceoceacgocgacia" +
"ckhdchdehdghdihd" +
"kaecoecaeeaegaei" +
"aekhfcagcaichida" +
"ieoiehifaigvjebk" +
"ackcckeckgbkibkk" +
"vlcoliolkbmacmcc" +
"mgbmibmkamoonavn" +
"conkboacoccoecog" +
"bokaomaooopavpco" +
"pkbqacqccqgbqibq" +
"kvrcoriorkbsacsc" +
"csecsgbsibskvtea" +
"uchudaueouehufau" +
"gawchxcaycoycaye" +
"aygayiaykhzchzeh" +
"zghzihzkaAcaAeoA" +
"eaAgoAgaAiaAkhBe" +
"hBgaCeaCg")
r(5411, "Snake", layout="0bagbaiobhbcgbci" +
"bdebecbegbfebgcb" +
"habicbiicikcimbj" +
"avjlbkcbkebkgbki" +
"ckkckmakooleolgo" +
"livllhlobmcbmebm" +
"gbmicmkcmmamoomo" +
"vnlhnocokcomaooo" +
"oovplhpobqcbqebq" +
"gbqicqkcqmaqoore" +
"orgorivrlbscbseb" +
"sgbsicskcsmbtabu" +
"cbvabwcbwebwgbwi" +
"bwkbycbyebygbyib" +
"ykbAjaCj")
r(5412, "Boar", layout="0aacaaehafaagoag" +
"hahaaiaakhbchbka" +
"ccoccaciackockac" +
"mhdchdkhdmaecaee" +
"aekoekaemoemhfkh" +
"fmagiagkogkagmhh" +
"kaiiaikakcbkgbki" +
"akmolgolibmcbmeb" +
"mgbmibmkbmmoncon" +
"epngpnionkonmano" +
"aoabocvocboevoeb" +
"ogboibokvokbomvo" +
"mhooopcopeppgppi" +
"opkopmapobqcbqeb" +
"qgbqibqkbqmorgor" +
"iascbsgbsiasmaui" +
"aukhvkawiawkowka" +
"wmhxkhxmaycayeay" +
"koykaymoymhzchzk" +
"hzmaAcoAcaAiaAko" +
"AkaAmhBchBkaCcaC" +
"ehCfaCgoCghChaCi" +
"aCk")
r(5413, "Ox", layout="0aahabeabkbcgoch" +
"bciaeaaecbegbeia" +
"emaeohfbhfnagaag" +
"cagebggbgiagkagm" +
"agoaicbiebigbiib" +
"ikaimakcbkeckgck" +
"ibkkakmbmecmgcmi" +
"bmkaodioeaofjoga" +
"ohjoiaojiokaolcq" +
"edqgdqicqkcsedsg" +
"dsicskaucbuecugc" +
"uibukaumawcbwecw" +
"gcwibwkawmayaayc" +
"ayebygbyiaykayma" +
"yohzbhznaAaaAcaA" +
"haAmaAo")
r(5414, "Bridge 2", layout="0daadacdaedagdai" +
"dakdamdaocccccec" +
"cgccicckccmbeebe" +
"gbeibekaggagiaih" +
"hjhakhokhhlhvlha" +
"mfamhomhCmhhnhvn" +
"hJnhanjaofaohooh" +
"Cohhphvphaqhoqhh" +
"rhashaugauibwebw" +
"gbwibwkcyccyecyg" +
"cyicykcymdAadAcd" +
"AedAgdAidAkdAmdA" +
"o")
# r(5501, "Big X", layout="0aacaamhbchbmacb" +
# "occacdaclocmacnh" +
# "dbhddhdlhdnaeaae" +
# "coedaeeaekoelaem" +
# "aeohfchfehfkhfma" +
# "gbagdogeagfagjog" +
# "kaglagnhhdhhfhhj" +
# "hhlaicaieoifaiga" +
# "iioijaikaimhjehj" +
# "ghjihjkakdakfokg" +
# "akhokiakjaklhlfh" +
# "lhhljameamgomgam" +
# "iomiamkhnfhnhhnj" +
# "aofoofaohoohaojo" +
# "ojhpfhphhpjaqeaq" +
# "goqgaqioqiaqkhrf" +
# "hrhhrjasdasfosga" +
# "shosiasjaslhteht" +
# "ghtihtkaucaueouf" +
# "augauioujaukaumh" +
# "vdhvfhvjhvlawbaw" +
# "doweawfawjowkawl" +
# "awnhxchxehxkhxma" +
# "yaaycoydayeaykoy" +
# "laymayohzbhzdhzl" +
# "hznaAboAcaAdaAlo" +
# "AmaAnhBchBmaCcaC" +
# "m")
# r(5502, "Axis", layout="0bafcahbajbbdvbh" +
# "bblcchCchbdcvdhb" +
# "dmcehCehbfbvfhbf" +
# "ncghahaahohiahio" +
# "ajabjhajohkabkfb" +
# "kjhkoalabldbllal" +
# "ohmacmhhmoanaanc" +
# "vnhanmanoiobcohi" +
# "onapbwphapniqbcq" +
# "hiqnaraarcvrharm" +
# "arohsacshhsoatab" +
# "tdbtlatohuabufbu" +
# "jhuoavabvhavohwa" +
# "hwoaxaaxocyhbzbv" +
# "zhbzncAhCAhbBcvB" +
# "hbBmcChCChbDdvDh" +
# "bDlbEfcEhbEj")
# r(5503, "Cobweb", layout="0aacaafhagaahoah" +
# "haiaajaamacbhcca" +
# "cdaclhcmacnadfhd" +
# "gadhodhhdiadjaea" +
# "aeohfaafcafeafha" +
# "fkafmhfoagaogaag" +
# "oogohhaahcahhahm" +
# "hhoaiaoiaaiooioh" +
# "jaajdajhajlhjoak" +
# "aakoalealhalkama" +
# "amoancanfhnganhh" +
# "nianjanmaoahoboo" +
# "gooihonaooopbapc" +
# "bpgvpgbpivpiapmo" +
# "pnaqahqboqgoqihq" +
# "naqoarcarfhrgarh" +
# "hriarjarmasaasoa" +
# "teathatkauaauohv" +
# "aavdavhavlhvoawa" +
# "owaawoowohxaaxca" +
# "xhaxmhxoayaoyaay" +
# "ooyohzaazcazeazh" +
# "azkazmhzoaAaaAoa" +
# "BfhBgaBhoBhhBiaB" +
# "jaCbhCcaCdaClhCm" +
# "aCnaEcaEfhEgaEho" +
# "EhhEiaEjaEm")
# r(5504, "Pyramids", layout="0aaaaacaakaamhbb" +
# "abeabgabihblacaa" +
# "ccackacmhdbadead" +
# "gadihdlaeaaecaek" +
# "aemaffhfgafhahba" +
# "heahiahlhibhiehi" +
# "ihilajbojbajdoje" +
# "ajfajhojiajjajlo" +
# "jlhkbvkbhkevkehk" +
# "ghkivkihklvklalb" +
# "olbClbaldoleClea" +
# "lfolgalholiClial" +
# "jallollCllhmbvmb" +
# "hmevmehmgvmghmiv" +
# "mihmlvmlanbonbCn" +
# "bandoneCneanfong" +
# "anhoniCnianjanlo" +
# "nlCnlhobvobhoevo" +
# "ehoghoivoiholvol" +
# "apbopbapdopeapfa" +
# "phopiapjaploplhq" +
# "bhqehqihqlarbare" +
# "ariarlatfhtgatha" +
# "uaaucaukaumhvbav" +
# "eavgavihvlawaawc" +
# "awkawmhxbaxeaxga" +
# "xihxlayaaycaykay" +
# "m")
# r(5505, "Wicker", layout="0bafbakbbcbbhbbm" +
# "bcebcjbdbbdgbdlb" +
# "edbeibenbfabffbf" +
# "kbgcbghbgmbhebhj" +
# "bibbigbilbjdbjib" +
# "jnbkabkfbkkblcbl" +
# "hblmbmebmjbnbbng" +
# "bnlbodboibonbpab" +
# "pfbpkbqcbqhbqmbr" +
# "ebrjbsbbsgbslbtd" +
# "btibtnbuabufbukb" +
# "vcbvhbvmbwebwjbx" +
# "bbxgbxlbydbyibyn" +
# "bzfbzkbAh")
r(5801, "Faro", name="Double Mahjongg Faro", ncards=288, layout="0aaaha" +
"baachadaae" +
"oaehafaagiahaaih" +
"ajaakoakhalaamha" +
"naaoobcvbhobmacb" +
"hccvccacdacgichC" +
"chaciaclhcmvcmac" +
"nodcCdcvdhodmCdm" +
"aebhecvecaedheea" +
"efcehCehaejhekae" +
"lhemvemaenofcvfh" +
"ofmbgcagfhggagho" +
"ghhgiagjbgmahaah" +
"ohiahioajapjaajc" +
"cjebjhcjkajmajop" +
"johkahkcokhhkmhk" +
"oalaalcqlcalfhlg" +
"alhvlhhlialjalmq" +
"lmalohmcomhCmhhm" +
"manbqncandhneanf" +
"bnhvnhanjhnkanlq" +
"nmannhocooeoohoo" +
"khomapcppcCpdbpe" +
"vpebphwphbpkvpkC" +
"plapmppmhqcoqeoq" +
"hoqkhqmarbqrcard" +
"hrearfbrhvrharjh" +
"rkarlqrmarnhscos" +
"hCshhsmataatcqtc" +
"atfhtgathvthhtia" +
"tjatmqtmatohuahu" +
"couhhumhuoavapva" +
"avccvebvhcvkavma" +
"vopvohwahwoaxaax" +
"obycayfhygayhoyh" +
"hyiayjbymozcvzho" +
"zmaAbhAcvAcaAdhA" +
"eaAfcAhCAhaAjhAk" +
"aAlhAmvAmaAnoBcC" +
"BcvBhoBmCBmaCbhC" +
"cvCcaCdaCgiChCCh" +
"aCiaClhCmvCmaCno" +
"DcvDhoDmaEahEbaE" +
"chEdaEeoEehEfaEg" +
"iEhaEihEjaEkoEkh" +
"ElaEmhEnaEo")
# r(5802, "Big Square", name="Double Mahjongg Big Square", ncards=288,
# layout="0daadacdaedagdai" +
# "dakdcadccdcedcgd" +
# "cidckdeadecdeede" +
# "gdeidekdgadgcdge" +
# "dggdgidgkdiadicd" +
# "iedigdiidikdkadk" +
# "cdkedkgdkidkkdma" +
# "dmcdmedmgdmidmkd" +
# "oadocdoedogdoido" +
# "kdqadqcdqedqgdqi" +
# "dqkdsadscdsedsgd" +
# "sidskduaducduedu" +
# "gduidukdwadwcdwe" +
# "dwgdwidwk")
r(5803, "Two Squares", name="Double Mahjongg Two Squares", ncards=288,
layout="0daadacdaedagdai" +
"dakdcadccdcedcgd" +
"cidckdeadecdeede" +
"gdeidekdgadgcdge" +
"dggdgidgkdiadicd" +
"iedigdiidikdkadk" +
"cdkedkgdkidkkdoa" +
"docdoedogdoidokd" +
"qadqcdqedqgdqidq" +
"kdsadscdsedsgdsi" +
"dskduaducduedugd" +
"uidukdwadwcdwedw" +
"gdwidwkdyadycdye" +
"dygdyidyk")
# r(5804, "Rows", name="Double Mahjongg Rows", ncards=288,
# layout="0daadacCaddaeCaf" +
# "dagCahdaidakdcad" +
# "ckeeadeceeeeegde" +
# "ieekegaegkeiadic" +
# "eieeigdiieikekae" +
# "kkemadmcemeemgdm" +
# "iemkeoaeokeqadqc" +
# "eqeeqgdqieqkesae" +
# "skeuaduceueeugdu" +
# "ieukewaewkeyadyc" +
# "eyeeygdyieykdAad" +
# "AkdCadCcCCddCeCC" +
# "fdCgCChdCidCk")
r(5805, "Twin Picks", name="Double Mahjongg Twin Picks", ncards=288,
layout="0aacaaeaagaaiaak" +
"aamhbdhbfhbhhbjh" +
"blacaaccaceoceac" +
"gocgaciociackock" +
"acmacohdbhddhdfv" +
"dfhdhvdhhdjvdjhd" +
"lhdnaeaaecoecaee" +
"oeeaegoegCegaeio" +
"eiCeiaekoekaemoe" +
"maeohfbhfdvfdhff" +
"vffhfhvfhhfjvfjh" +
"flvflhfnagaagcog" +
"cageogeCgeaggogg" +
"CggagiogiCgiagko" +
"gkCgkagmogmagohh" +
"bhhdvhdhhfvhfhhh" +
"vhhhhjvhjhhlvhlh" +
"hnaiaaicoicaieoi" +
"eaigoigCigaiioii" +
"Ciiaikoikaimoima" +
"iohjbhjdhjfvjfhj" +
"hvjhhjjvjjhjlhjn" +
"akaakcakeokeakgo" +
"kgakiokiakkokkak" +
"makohldhlfhlhhlj" +
"hllamcameamgamia" +
"mkammapaapcapeap" +
"gapiapkapmapoasc" +
"aseasgasiaskasmh" +
"tdhtfhthhtjhtlau" +
"aaucaueoueaugoug" +
"auiouiaukoukauma" +
"uohvbhvdhvfvvfhv" +
"hvvhhvjvvjhvlhvn" +
"awaawcowcaweowea" +
"wgowgCwgawiowiCw" +
"iawkowkawmowmawo" +
"hxbhxdvxdhxfvxfh" +
"xhvxhhxjvxjhxlvx" +
"lhxnayaaycoycaye" +
"oyeCyeaygoygCyga" +
"yioyiCyiaykoykCy" +
"kaymoymayohzbhzd" +
"vzdhzfvzfhzhvzhh" +
"zjvzjhzlvzlhznaA" +
"aaAcoAcaAeoAeaAg" +
"oAgCAgaAioAiCAia" +
"AkoAkaAmoAmaAohB" +
"bhBdhBfvBfhBhvBh" +
"hBjvBjhBlhBnaCaa" +
"CcaCeoCeaCgoCgaC" +
"ioCiaCkoCkaCmaCo" +
"hDdhDfhDhhDjhDla" +
"EcaEeaEgaEiaEkaE" +
"m")
r(5806, "Roost", name="Double Mahjongg Roost", ncards=288,
layout="0aaahabaacoachad" +
"vadaaeoaehafvafa" +
"agoaghahvahaaioa" +
"ihajaakaamaaoCbf" +
"hblhbnacbhccacdo" +
"cdhcevceacfocfhc" +
"gvcgachochhciacj" +
"aclocmacnhdkhdma" +
"eiaekoelaemaeoaf" +
"aafcafehfjhflvfl" +
"hfnhgchgeaghagjo" +
"gkaglCglogmagnah" +
"bohcahdoheahfhhi" +
"hhkvhlhhmhibhidv" +
"iehifaiioijaikoi" +
"laimajaajcojdaje" +
"Cjeojfajghjjvjkh" +
"jlajohkcvkdhkevk" +
"fhkgakjokkaklalb" +
"olcaldolealfClfo" +
"lgalhhlkblnhmbhm" +
"dvmehmfvmghmhamk" +
"omnanaancondaneo" +
"nfangCngonhanian" +
"mhnnanohochoevof" +
"hogvohhoiapbapdo" +
"peapfopgaphCphop" +
"iapjhpkaploplhpm" +
"apnhqchqevqfhqgv" +
"qhhqiaraarcordar" +
"eorfargCrgorhari" +
"armhrnarohsbhsdv" +
"sehsfvsghshaskos" +
"natbotcatdoteatf" +
"Ctfotgathhtkbtnh" +
"ucvudhuevufhugau" +
"joukaulavaavcovd" +
"aveCveovfavghvjv" +
"vkhvlavohwbhwdvw" +
"ehwfawiowjawkowl" +
"awmaxboxcaxdoxea" +
"xfhxihxkvxlhxmhy" +
"chyeayhayjoykayl" +
"Cyloymaynazaazca" +
"zehzjhzlvzlhznaA" +
"iaAkoAlaAmaAohBk" +
"hBmaCbhCcaCdoCdh" +
"CevCeaCfoCfhCgvC" +
"gaChoChhCiaCjaCl" +
"oCmaCnCDfhDlhDna" +
"EahEbaEcoEchEdvE" +
"daEeoEehEfvEfaEg" +
"oEghEhvEhaEioEih" +
"EjaEkaEmaEo")
r(5807, "Castle", name="Double Mahjongg Big Castle", ncards=288,
layout="0eaadacdaeeageai" +
"dakdameaodcadcoc" +
"ddvdecdfvdgcdhCd" +
"hvdicdjvdkcdldea" +
"deoafdaflcgacgoa" +
"hdahlciacioajdaj" +
"lckahkdhklckoald" +
"elfblheljallcmah" +
"mdhmlcmoandbnfbn" +
"janleoahodoofooj" +
"holeooapdbpfvpfb" +
"pjvpjapleqahqdoq" +
"foqjhqleqoardbrf" +
"brjarlcsahsdhslc" +
"soatdetfbthetjat" +
"lcuahudhulcuoavd" +
"avlcwacwoaxdaxlc" +
"yacyoazdazldAadA" +
"ocBdvBecBfvBgcBh" +
"CBhvBicBjvBkcBld" +
"CadCoeEadEcdEeeE" +
"geEidEkdEmeEo")
r(5808, "Eight Squares", name="Double Mahjongg Eight Squares", ncards=288,
layout="0daadacdaedahdaj" +
"daldcadccdcedchd" +
"cjdcldeadecdeede" +
"hdejdeldhadhcdhe" +
"dhhdhjdhldjadjcd" +
"jedjhdjjdjldladl" +
"cdledlhdljdlldoa" +
"docdoedohdojdold" +
"qadqcdqedqhdqjdq" +
"ldsadscdsedshdsj" +
"dsldvadvcdvedvhd" +
"vjdvldxadxcdxedx" +
"hdxjdxldzadzcdze" +
"dzhdzjdzl")
r(5809, "Big Flying Dragon", name="Double Mahjongg Big Flying Dragon",
ncards=288, layout="0aajacaaciackacs" +
"aeaaegaeihejaeka" +
"emaesagaageaggbg" +
"ibgkagmagoagsaia" +
"aicaiebigbiibikb" +
"imaioaiqaisakabk" +
"cbkebkgbkibkkbkm" +
"bkobkqaksbmabmcc" +
"mecmgcmicmkcmmcm" +
"obmqbmsboaboccoe" +
"dogdoidokdomcoob" +
"oqbosbqabqccqedq" +
"geqieqkdqmcqobqq" +
"bqsJrjbsabsccsed" +
"sgesieskdsmcsobs" +
"qbssbuabuccuedug" +
"duidukdumcuobuqb" +
"usbwabwccwecwgcw" +
"icwkcwmcwobwqbws" +
"ayabycbyebygbyib" +
"ykbymbyobyqaysaA" +
"aaAcaAebAgbAibAk" +
"bAmaAoaAqaAsaCaa" +
"CeaCgbCibCkaCmaC" +
"oaCsaEaaEgaEihEj" +
"aEkaEmaEsaGaaGia" +
"GkaGsaIaaIjaIsaK" +
"j")
r(5810, "Sphere", name="Double Mahjongg Sphere", ncards=288,
layout="0aajaalaanabhhbk" +
"hbmabpacfhciacjo" +
"ckaclocmacnhcoac" +
"raddhdgadhodivdk" +
"hdlvdmodoadphdqa" +
"dtaefoegveihejae" +
"koekaemoemhenveo" +
"oeqaerafchfdhffh" +
"fhafiafohfphfrhf" +
"tafuageogeaggpgg" +
"pgihgjpgkbglpgmh" +
"gnpgoagqpgqagsog" +
"sahbhhchhfhhhahj" +
"ahnhhphhrhhuahva" +
"idoidvieaifoigai" +
"hoiihijoikbiloim" +
"hinoioaipoiqairv" +
"isaitoitajahjbhj" +
"dhjfhjhvjlhjphjr" +
"hjthjvajwakcokcv" +
"kdakeokeakgokgak" +
"iokiakkokkakmokm" +
"akookoakqokqakso" +
"ksvktakuokualahl" +
"bhldhlfvlfhlhvlh" +
"hljvljhllvllhlnv" +
"lnhlpvlphlrvlrhl" +
"thlvalwamcomcvmd" +
"ameomeamgomgamio" +
"miamkomkammommam" +
"oomoamqomqamsoms" +
"vmtamuomuanahnbh" +
"ndhnfhnhvnlhnphn" +
"rhnthnvanwaodood" +
"voeaofoogaohooih" +
"ojookboloomhonoo" +
"oaopooqaorvosaot" +
"ootapbhpchpfhpha" +
"pjapnhpphprhpuap" +
"vaqeoqeaqgpqgpqi" +
"hqjpqkbqlpqmhqnp" +
"qoaqqpqqaqsoqsar" +
"chrdhrfhrhariaro" +
"hrphrrhrtaruasfo" +
"sgvsihsjaskoskas" +
"mosmhsnvsoosqasr" +
"atdhtgathotivtkh" +
"tlvtmotoatphtqat" +
"taufhuiaujoukaul" +
"oumaunhuoauravhh" +
"vkhvmavpawjawlaw" +
"n")
# ----------------------------------------------------------------------
r(5901, "Happy New Year", name="Half Mahjongg Happy New Year", ncards=72,
layout="0aafaajaanaceaci" +
"acmbedbehaelofdo" +
"fhhflbgdbghagloh" +
"dohhaibbidaighih" +
"aiiailhimainojma" +
"kaakeckhakjbkmbk" +
"oolmambbmdamghmh" +
"amiamlhmmamnondo" +
"nhbodbohaolopdop" +
"hhplbqdbqhaqlase" +
"asiasmaufaujaun")
# r(5902, "K 2", name="Half Mahjongg K 2", ncards=72,
# layout="0aagabcabehbfobg" +
# "hbhabiabkacgvcga" +
# "dbidgadlaegvegbf" +
# "aifgbfmaggbhaihg" +
# "bhmaigbjahjgbjma" +
# "kgokgblahlgblmam" +
# "gbnaingbnmaogbpa" +
# "ipgbpmaqgvqgarbi" +
# "rgarlasgvsgatcat" +
# "ehtfotghthatiatk" +
# "aug")
# r(5903, "Abstract", name="Half Mahjongg Abstract", ncards=72,
# layout="0aaaaagabcabebdd" +
# "adgadioedhehafch" +
# "fdafeafhagahhaah" +
# "dahgaiahjaojbbjc" +
# "ajfakaalcamfamha" +
# "nbhncandhngaogbo" +
# "iapdhqdaqiarcord" +
# "arehrihsdasgasia" +
# "tdauaaufhvbavcav" +
# "iawaawehxeaxiayc" +
# "ayebyghzdaAdaAha" +
# "BbaBfhCfaCiaDcaD" +
# "eaDghDhaEaaEi")
r(5904, "Smile", name="Half Mahjongg Smile", ncards=72,
layout="0bagoahbaibbebbk" +
"bccbcmbebbenaffb" +
"fjbgahgfbgoahfbh" +
"kbiabiobjlbkabko" +
"bllbmabmoanfbnkb" +
"oahofbooapfbpjbq" +
"bbqnbscbsmbtebtk" +
"bugouhbui")
r(5905, "Wall", name="Half Mahjongg Wall", ncards=72,
layout="0eaabacbaebagbai" +
"bakbameaoacaacoa" +
"eaaeoagaagoaiaai" +
"oakaakoamaamoaoa" +
"aooaqaaqoasaasoa" +
"uaauoawaawoayaay" +
"oaAaaAoaCaaCoeEa" +
"bEcbEebEgbEibEkb" +
"EmeEo")
# ----------------------------------------------------------------------
# r(5601, "Skomoroh 1", ncards=28, layout="0aacaaeaaghbdhbf" +
# "acaacdoceacfacih" +
# "ddhdfaebaeeoeeae" +
# "hhfdhffagaagdoge" +
# "agfagihhdhhfaica" +
# "ieaig")
# r(5602, "Skomoroh 2", ncards=116, layout="0aaeaaghahaaiaak" +
# "abaaboacfbchacja" +
# "daadoaeghehaeiaf" +
# "aafocghahaahcahf" +
# "vhhahjahmahohidc" +
# "ihhilajaajdajfwj" +
# "hajjajlajohkdhkg" +
# "akhokhhkihklalaa" +
# "lcalewlhalkalmal" +
# "ohmfamgimhamihmj" +
# "anaancanewnhanka" +
# "nmanohodhogaohoo" +
# "hhoiholapaapdapf" +
# "wphapjaplapohqdc" +
# "qhhqlaraarcarfvr" +
# "harjarmarocshata" +
# "atoaughuhauiavaa" +
# "voawfbwhawjaxaax" +
# "oayeayghyhayiayk")
# r(5603, "Skomoroh 3", ncards=132, layout="0aachadaaeoaeXae" +
# "hafyafaagoagXagh" +
# "ahaaiabaabkhcahc" +
# "kadaadeadgadkhea" +
# "hefhekafaafeafga" +
# "fkhgahgfhgkahaah" +
# "eahgahkhiahifhik" +
# "ajaajeajgajkhkah" +
# "kfhkkalaalealgal" +
# "khmahmfhmkanaane" +
# "onfangankhofXofa" +
# "pbapdapfspfaphap" +
# "jhqfXqfaraareorf" +
# "argarkhsahsfhska" +
# "taateatgatkhuahu" +
# "fhukavaaveavgavk" +
# "hwahwfhwkaxaaxea" +
# "xgaxkhyahyfhykaz" +
# "aazeazgazkhAahAf" +
# "hAkaBaaBeaBgaBkh" +
# "CahCkaDaaDkaEchE" +
# "daEeoEeXEehEfyEf" +
# "aEgoEgXEghEhaEi")
# r(5604, "Skomoroh 4", ncards=52, layout="0aajaalaanabhabp" +
# "acfacnacraddadla" +
# "dtaejafcafuagiah" +
# "bbhoahvaiiajaajw" +
# "akjalaalwamkamma" +
# "naanwaonapaapwaq" +
# "oarbbriarvasoatc" +
# "atuaunavdavlavta" +
# "wfawjawraxhaxpay" +
# "jaylayn")
# r(5605, "Skomoroh 5", ncards=208, layout="0aahaajaalaanaap" +
# "hbihbkoblhbmhboa" +
# "ccaceacgaciackac" +
# "macoacqacsacuaec" +
# "aeuagdagjaglagna" +
# "gthhkhhmaieaijai" +
# "loilainaishjkhjm" +
# "akfakjakloklakna" +
# "krhlkhlmameamgam" +
# "jamlomlamnamqams" +
# "anchndhnkhnmhnta" +
# "nuaoeaohaojaoloo" +
# "laonaopaosapchpd" +
# "hpkhpmhptapuaqea" +
# "qhaqjaqlaqnaqpaq" +
# "saraarchrdhrtaru" +
# "arwaseasgasiaska" +
# "smasoasqassataht" +
# "batchtdhtfithitj" +
# "itlitnitphtrhtta" +
# "tuhtvatwaueaugau" +
# "iaukaumauoauqaus" +
# "avaavchvdhvtavua" +
# "vwaweawhawjawlaw" +
# "nawpawsaxchxdhxk" +
# "hxmhxtaxuayeayha" +
# "yjayloylaynaypay" +
# "sazchzdhzkhzmhzt" +
# "azuaAeaAgaAjaAlo" +
# "AlaAnaAqaAshBkhB" +
# "maCfaCjaCloClaCn" +
# "aCrhDkhDmaEeaEja" +
# "EloElaEnaEshFkhF" +
# "maGdaGjaGlaGnaGt" +
# "aIcaIuaKcaKeaKga" +
# "KiaKkaKmaKoaKqaK" +
# "saKuhLihLkoLlhLm" +
# "hLoaMhaMjaMlaMna" +
# "Mp")
# r(5606, "Skomoroh 6", layout="0aadaafaahaajaal" +
# "aanaapadaaddadfa" +
# "dhadjadladnadpad" +
# "sheehegheihekhem" +
# "heoafaafdaffoffa" +
# "fhofhafjofjaflof" +
# "lafnofnafpafshge" +
# "hggvgghgivgihgkv" +
# "gkhgmvgmhgoahaCh" +
# "hChjChlahsaidaif" +
# "oifaihoihJiiaijo" +
# "ijJikailoilainoi" +
# "naipajahjehjgvjg" +
# "CjhhjivjihjkvjkC" +
# "jlhjmvjmhjoajsak" +
# "dakfokfakhokhJki" +
# "akjokjJkkaklokla" +
# "knoknakpalaClhCl" +
# "jCllalshmehmgvmg" +
# "hmivmihmkvmkhmmv" +
# "mmhmoanaandanfon" +
# "fanhonhanjonjanl" +
# "onlannonnanpansh" +
# "oehoghoihokhomho" +
# "oapaapdapfaphapj" +
# "aplapnappapsasda" +
# "sfashasjaslasnas" +
# "p")
# r(5607, "Skomoroh 7", ncards=56, layout="0aabaadaafaahaaj" +
# "aapaaraatablabwa" +
# "daadmadwafaafnaf" +
# "wahaahnahwajfajh" +
# "ajmajwakdakjalbd" +
# "llalvamnamtanaan" +
# "kanpanrapaapjapw" +
# "araarjarwataatka" +
# "twavaavlawdawfaw" +
# "hawnawpawrawtawv")
| gpl-3.0 |
MPBAUnofficial/cmsplugin_image_gallery | cmsplugin_image_gallery/models.py | 1 | 4086 | import threading
from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from inline_ordering.models import Orderable
from filer.fields.image import FilerImageField
from django.core.exceptions import ValidationError
import utils
localdata = threading.local()
localdata.TEMPLATE_CHOICES = utils.autodiscover_templates()
TEMPLATE_CHOICES = localdata.TEMPLATE_CHOICES
class GalleryPlugin(CMSPlugin):
def copy_relations(self, oldinstance):
for img in oldinstance.image_set.all():
new_img = Image()
new_img.gallery=self
new_img.inline_ordering_position = img.inline_ordering_position
new_img.src = img.src
new_img.image_url = img.image_url
new_img.title = img.title
new_img.alt = img.alt
new_img.save()
template = models.CharField(max_length=255,
choices=TEMPLATE_CHOICES,
default='cmsplugin_gallery/gallery.html',
editable=len(TEMPLATE_CHOICES) > 1)
def __unicode__(self):
return _(u'%(count)d image(s) in gallery') % {'count': self.image_set.count()}
class Image(Orderable):
def get_media_path(self, filename):
pages = self.gallery.placeholder.page_set.all()
return pages[0].get_media_path(filename)
gallery = models.ForeignKey(
GalleryPlugin,
verbose_name=_("gallery")
)
src = FilerImageField(
null=True,
blank=True,
verbose_name=_("image")
)
image_url = models.URLField(
_("alternative image url"),
verify_exists=True,
null=True,
blank=True,
default=None
)
link_url = models.URLField(
_("link url"),
verify_exists=True,
null=True,
blank=True,
default=None,
help_text=_("url used when user click on the image")
)
src_height = models.PositiveSmallIntegerField(
_("image height"),
editable=False,
null=True
)
src_width = models.PositiveSmallIntegerField(
_("image width"),
editable=False,
null=True
)
title = models.CharField(
_("title"),
max_length=255,
blank=True
)
alt = models.CharField(
_("alt text"),
max_length=80,
blank=True
)
def clean(self):
if not self.src and not self.image_url:
raise ValidationError(_("Image not specified, use image or alternative url to specify the image source"))
def __unicode__(self):
return self.title or self.alt or str(self.pk)
#I don't know why, but insert class Meta in Image cause Orderable class field to doesn't work
#but this small hack solve the problem
Image._meta.get_field('inline_ordering_position').verbose_name = _("Inline ordering position")
Image._meta.verbose_name = _("Image")
Image._meta.verbose_name_plural = _("Images")
| bsd-2-clause |
jswanljung/iris | tools/generate_std_names.py | 12 | 4673 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A script to convert the standard names information from the provided XML
file into a Python dictionary format.
Takes two arguments: the first is the XML file to process and the second
is the name of the file to write the Python dictionary file into.
By default, Iris will use the source XML file:
etc/cf-standard-name-table.xml
as obtained from:
http://cf-pcmdi.llnl.gov/documents/cf-standard-names
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import argparse
import pprint
import xml.etree.ElementTree as ET
STD_VALUES_FILE_TEMPLATE = '''
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This file contains a dictionary of standard value names that are mapped
to another dictionary of other standard name attributes. Currently only
the `canonical_unit` exists in these attribute dictionaries.
This file is automatically generated. Do not edit this file by hand.
The file will be generated during a standard build/installation:
python setup.py build
python setup.py install
Also, the file can be re-generated in the source distribution via:
python setup.py std_names
Or for more control (e.g. to use an alternative XML file) via:
python tools/generate_std_names.py XML_FILE MODULE_FILE
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
STD_NAMES = '''.lstrip()
def process_name_table(tree, element_name, *child_elements):
"""
Yields a series of dictionaries with the key being the id of the entry element and the value containing
another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc.
"""
for elem in tree.iterfind(element_name):
sub_section = {}
for child_elem in child_elements:
found_elem = elem.find(child_elem)
sub_section[child_elem] = found_elem.text if found_elem is not None else None
yield {elem.get("id") : sub_section}
def to_dict(infile, outfile):
values = {}
aliases = {}
tree = ET.parse(infile)
for section in process_name_table(tree, 'entry', 'canonical_units'):
values.update(section)
for section in process_name_table(tree, 'alias', 'entry_id'):
aliases.update(section)
for key, valued in six.iteritems(aliases):
values.update({
key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')}
})
outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create Python code from CF standard name XML.')
parser.add_argument('input', metavar='INPUT',
help='Path to CF standard name XML')
parser.add_argument('output', metavar='OUTPUT',
help='Path to resulting Python code')
args = parser.parse_args()
if six.PY2:
encoding = {}
else:
encoding = {'encoding': 'utf-8'}
with open(args.input, 'r', **encoding) as in_fh:
with open(args.output, 'w', **encoding) as out_fh:
to_dict(in_fh, out_fh)
| lgpl-3.0 |
Emotroid-Team/emotion_tw_caf_kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
skg-net/ansible | test/runner/shippable.py | 117 | 2708 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Verify the current Shippable run has the required number of jobs."""
from __future__ import absolute_import, print_function
# noinspection PyCompatibility
import argparse
import errno
import json
import os
import sys
from lib.http import (
HttpClient,
)
from lib.util import (
display,
ApplicationError,
ApplicationWarning,
MissingEnvironmentVariable,
)
try:
import argcomplete
except ImportError:
argcomplete = None
def main():
"""Main program function."""
try:
args = parse_args()
display.verbosity = args.verbosity
display.color = args.color
try:
run_id = os.environ['SHIPPABLE_BUILD_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(ex.args[0])
client = HttpClient(args)
response = client.get('https://api.shippable.com/jobs?runIds=%s' % run_id)
jobs = response.json()
if not isinstance(jobs, list):
raise ApplicationError(json.dumps(jobs, indent=4, sort_keys=True))
if len(jobs) == 1:
raise ApplicationError('Shippable run %s has only one job. Did you use the "Rebuild with SSH" option?' % run_id)
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
parser.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
parser.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
elif 'SHIPPABLE' in os.environ:
args.color = True
else:
args.color = sys.stdout.isatty()
return args
if __name__ == '__main__':
main()
| gpl-3.0 |
WSDC-NITWarangal/django | tests/utils_tests/test_text.py | 243 | 9471 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
' jumped over the lazy dog.</em></strong></p>')
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
'</strong></p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
' the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
'jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días!'
' ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>',
truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
"abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode('utf-8') for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
| bsd-3-clause |
eyzhou123/python-games | tetris.py | 1 | 10241 | #tetris.py
from Tkinter import *
import random
def tetrisMousePressed(canvas,event):
tetrisRedrawAll(canvas)
def tetrisKeyPressed(canvas,event):
if event.keysym == "r":
tetrisInit(canvas)
if (canvas.data.isTetrisGameOver == False):
if event.keysym == "Left":
moveFallingPiece(canvas,0,-1)
elif event.keysym == "Right":
moveFallingPiece(canvas,0,+1)
elif event.keysym == "Up":
rotateFallingPiece(canvas)
elif event.keysym == "Down":
moveFallingPiece(canvas,+1,0)
tetrisRedrawAll(canvas)
def tetrisTimerFired(canvas):
if (canvas.data.isTetrisGameOver == False):
if moveFallingPiece(canvas,+1,0) == True:
moveFallingPiece(canvas,+1,0)
else:
placeFallingPiece(canvas)
newFallingPiece(canvas)
removeFullRows(canvas)
if (fallingPieceIsLegal(canvas) == False):
tetrisGameOver(canvas)
tetrisRedrawAll(canvas)
delay = 350 # milliseconds
def f():
tetrisTimerFired(canvas)
canvas.after(delay, f)# pause, then call timerFired again
def tetrisGameOver(canvas):
canvas.data.isTetrisGameOver = True
def tetrisRedrawAll(canvas):
canvas.delete(ALL)
drawTetrisGame(canvas)
drawTetrisScore(canvas)
if (canvas.data.isTetrisGameOver == True):
canvas.create_text(canvas.data.width/2,
canvas.data.height/2,text="Game Over!",font=("Helvetica",
32, "bold"))
def loadTetrisBoard(canvas):
(rows,cols) = (canvas.data.rows,canvas.data.cols)
canvas.data.tetrisBoard = [([canvas.data.emptyColor]*cols) for
row in xrange(rows)]
def drawTetrisGame(canvas):
canvas.create_rectangle(0,0,canvas.data.width,canvas.data.height,
fill = "orange")
drawTetrisBoard(canvas)
drawFallingPiece(canvas)
def drawTetrisBoard(canvas):
tetrisBoard = canvas.data.tetrisBoard
(rows,cols) = (len(tetrisBoard),len(tetrisBoard[0]))
for row in xrange(rows):
for col in xrange(cols):
color = tetrisBoard[row][col]
drawTetrisCell(canvas,row,col,color)
def drawTetrisCell(canvas,row,col,color):
tetrisBoard = canvas.data.tetrisBoard
margin = canvas.data.margin
cellSize = canvas.data.cellSize
left = margin + col * cellSize
right = left + cellSize
top = margin + row * cellSize
bottom = top + cellSize
canvas.create_rectangle(left, top, right, bottom,
fill = "black")
canvas.create_rectangle(left+1,top+1,right-1,bottom-1, #thin outline, use 1
fill = color)
def drawFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
drawTetrisCell(canvas,row,col,canvas.data.fallingPieceColor)
def newFallingPiece(canvas):
i = random.randint(0,len(canvas.data.tetrisPieces)-1)
canvas.data.fallingPiece = canvas.data.tetrisPieces[i]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[i]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
def moveFallingPiece(canvas,drow,dcol):
canvas.data.fallingPieceRow += drow
canvas.data.fallingPieceCol += dcol
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow -= drow
canvas.data.fallingPieceCol -= dcol
return False
return True
def rotateFallingPiece(canvas):
fallingPiece = canvas.data.fallingPiece
(fallingPieceRow,fallingPieceCol) = (canvas.data.fallingPieceRow,
canvas.data.fallingPieceCol)
(fallingPieceRows,fallingPieceCols) = (canvas.data.fallingPieceRows,
canvas.data.fallingPieceCols)
(oldCenterRow,oldCenterCol) = fallingPieceCenter(canvas)
(canvas.data.fallingPieceRows,canvas.data.fallingPieceCols) = (
canvas.data.fallingPieceCols,canvas.data.fallingPieceRows)
(newCenterRow,newCenterCol) = fallingPieceCenter(canvas)
canvas.data.fallingPieceRow +=oldCenterRow - newCenterRow
canvas.data.fallingPieceCol += oldCenterCol - newCenterCol
newCols = []
newList = []
for row in xrange(canvas.data.fallingPieceRows):
newCols = []
for col in xrange(canvas.data.fallingPieceCols):
newCols += [canvas.data.fallingPiece[
canvas.data.fallingPieceCols-1-col][row]]
newList += [newCols]
canvas.data.fallingPiece = newList
if (fallingPieceIsLegal(canvas) == False):
canvas.data.fallingPieceRow = fallingPieceRow
canvas.data.fallingPieceCol = fallingPieceCol
canvas.data.fallingPieceRows = fallingPieceRows
canvas.data.fallingPieceCols = fallingPieceCols
canvas.data.fallingPiece = fallingPiece
def fallingPieceCenter(canvas):
centerRow = canvas.data.fallingPieceRow + canvas.data.fallingPieceRows/2
centerCol = canvas.data.fallingPieceCol + canvas.data.fallingPieceCols/2
return (centerRow,centerCol)
def fallingPieceIsLegal(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
if ((row<0) or (row >= canvas.data.rows) or (col<0) or
(col >= canvas.data.cols) or (tetrisBoard[row][col]!=
canvas.data.emptyColor)):
return False
return True
def placeFallingPiece(canvas):
tetrisBoard = canvas.data.tetrisBoard
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
for row in xrange(canvas.data.fallingPieceRow,
canvas.data.fallingPieceRow + canvas.data.fallingPieceRows):
for col in xrange(canvas.data.fallingPieceCol,
canvas.data.fallingPieceCol + canvas.data.fallingPieceCols):
if (canvas.data.fallingPiece[row-canvas.data.fallingPieceRow
][col-canvas.data.fallingPieceCol] == True):
tetrisBoard[row][col] = canvas.data.fallingPieceColor
def removeFullRows(canvas):
tetrisBoard = canvas.data.tetrisBoard
fullRows = 0
newRow = canvas.data.rows-1
for oldRow in xrange(canvas.data.rows-1,-1,-1):
if (canvas.data.emptyColor in tetrisBoard[oldRow]):
for col in xrange(canvas.data.cols):
tetrisBoard[newRow][col] = tetrisBoard[oldRow][col]
newRow -= 1
else:
fullRows += 1
canvas.data.score += fullRows**2
def drawTetrisScore(canvas):
canvas.create_text(canvas.data.cellSize,canvas.data.cellSize/2,
text="Score: " + str(canvas.data.score),anchor=W,
font=("Helvetica",16, "bold"))
def tetrisInit(canvas):
canvas.data.emptyColor = "blue"
loadTetrisBoard(canvas)
canvas.data.iPiece = [
[ True, True, True, True]
]
canvas.data.jPiece = [
[ True, False, False ],
[ True, True, True]
]
canvas.data.lPiece = [
[ False, False, True],
[ True, True, True]
]
canvas.data.oPiece = [
[ True, True],
[ True, True]
]
canvas.data.sPiece = [
[ False, True, True],
[ True, True, False ]
]
canvas.data.tPiece = [
[ False, True, False ],
[ True, True, True]
]
canvas.data.zPiece = [
[ True, True, False ],
[ False, True, True]
]
canvas.data.tetrisPieces = [canvas.data.iPiece, canvas.data.jPiece,
canvas.data.lPiece, canvas.data.oPiece,canvas.data.sPiece,
canvas.data.tPiece, canvas.data.zPiece ]
canvas.data.tetrisPieceColors = [ "red", "yellow", "magenta",
"pink", "cyan", "green", "orange" ]
canvas.data.fallingPiece = canvas.data.tetrisPieces[
random.randint(0,len(canvas.data.tetrisPieces)-1)]
canvas.data.fallingPieceColor = canvas.data.tetrisPieceColors[
canvas.data.tetrisPieces.index(canvas.data.fallingPiece)]
canvas.data.fallingPieceRow = 0
canvas.data.fallingPieceWidth = len(canvas.data.fallingPiece[0])
canvas.data.fallingPieceCol = (canvas.data.cols/2 -
canvas.data.fallingPieceWidth/2)
canvas.data.fallingPieceRows = len(canvas.data.fallingPiece)
canvas.data.fallingPieceCols = len(canvas.data.fallingPiece[0])
canvas.data.isTetrisGameOver = False
canvas.data.score = 0
tetrisRedrawAll(canvas)
def tetrisRun(rows,cols):
# create the root and the canvas
root = Tk()
margin = 30
cellSize = 30
canvasWidth = 2*margin + cols*cellSize
canvasHeight = 2*margin + rows*cellSize
canvas = Canvas(root, width=canvasWidth, height=canvasHeight)
canvas.pack()
root.resizable(width=0, height=0)
# Set up canvas data and call init
class Struct: pass
canvas.data = Struct()
canvas.data.margin = margin
canvas.data.cellSize = cellSize
canvas.data.rows = rows
canvas.data.cols = cols
canvas.data.width = canvasWidth
canvas.data.height = canvasHeight
tetrisInit(canvas)
# set up events
def f(event): tetrisMousePressed(canvas, event)
root.bind("<Button-1>", f)
def g(event): tetrisKeyPressed(canvas, event)
root.bind("<Key>", g)
tetrisTimerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
tetrisRun(15,10)
| mit |
KeserOner/where-artists-share | was/artists/models.py | 1 | 1280 | from django.contrib.auth.models import User
from django.db import models
from django.dispatch.dispatcher import receiver
class Artists(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
artist_image = models.ImageField(
verbose_name="Artist's profile image",
null=True,
blank=True,
unique=True,
upload_to="artist_image/",
)
artist_banner = models.ImageField(
verbose_name="Artist's banner",
unique=True,
null=True,
blank=True,
upload_to="artist_banner/",
)
artist_bio = models.TextField(max_length=500, verbose_name="Artist's biografy")
artist_signature = models.CharField(
max_length=70, verbose_name="Artist's signature"
)
artist_followed = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="artists_followed",
blank=True,
null=True,
)
def __str__(self):
return "Profil de %s" % self.user.username
@receiver(models.signals.pre_delete, sender=Artists)
def delete_images(sender, instance, **kwargs):
if instance.artist_image:
instance.artist_image.delete(False)
if instance.artist_banner:
instance.artist_banner.delete(False)
| mit |
alex-quiterio/pychess | lib/pychess/Players/Engine.py | 22 | 4571 | from __future__ import absolute_import
from gi.repository import GObject
from threading import Thread
from pychess.compat import urlopen, urlencode
from pychess.System import fident
from pychess.System.Log import log
from pychess.Utils.Offer import Offer
from pychess.Utils.const import ARTIFICIAL, CHAT_ACTION
from .Player import Player
class Engine (Player):
__type__ = ARTIFICIAL
''' Argument is a vector of analysis lines.
The first element is the pv list of moves. The second is a score
relative to the engine. If no score is known, the value can be None,
but not 0, which is a draw. '''
__gsignals__ = {
'analyze': (GObject.SignalFlags.RUN_FIRST, None, (object,))
}
def __init__(self, md5=None):
Player.__init__(self)
self.md5 = md5
self.currentAnalysis = []
def on_analysis(self_, analysis):
self.currentAnalysis = analysis
self.connect('analyze', on_analysis)
#===========================================================================
# Offer handling
#===========================================================================
def offer (self, offer):
raise NotImplementedError
def offerDeclined (self, offer):
pass #Ignore
def offerWithdrawn (self, offer):
pass #Ignore
def offerError (self, offer, error):
pass #Ignore
#===========================================================================
# General Engine Options
#===========================================================================
def setOptionAnalyzing (self, mode):
self.mode = mode
def setOptionInitialBoard (self, model):
""" If the game starts at a board other than FEN_START, it should be
sent here. We sends a gamemodel, so the engine can load the entire
list of moves, if any """
pass # Optional
def setOptionVariant (self, variant):
""" Inform the engine of any special variant. If the engine doesn't
understand the variant, this will raise an error. """
raise NotImplementedError
def setOptionTime (self, secs, gain):
""" Seconds is the initial clock of the game.
Gain is the amount of seconds a player gets after each move.
If the engine doesn't support playing with time, this will fail."""
raise NotImplementedError
def setOptionStrength (self, strength):
""" Strength is a number [1,8] inclusive. Higher is better. """
self.strength = strength
raise NotImplementedError
#===========================================================================
# Engine specific methods
#===========================================================================
def canAnalyze (self):
raise NotImplementedError
def maxAnalysisLines (self):
raise NotImplementedError
def requestMultiPV (self, setting):
"""Set the number of analysis lines the engine will give, if possible.
If setting is too high, the engine's maximum will be used.
The setting will last until the next call to requestMultiPV.
Return value: the setting used.
"""
raise NotImplementedError
def getAnalysis (self):
""" Returns a list of moves, or None if there haven't yet been made an
analysis """
return self.currentAnalysis
#===========================================================================
# General chat handling
#===========================================================================
def putMessage (self, message):
def answer (message):
try:
data = urlopen("http://www.pandorabots.com/pandora/talk?botid=8d034368fe360895",
urlencode({"message":message, "botcust2":"x"}).encode("utf-8")).read().decode('utf-8')
except IOError as e:
log.warning("Couldn't answer message from online bot: '%s'" % e,
extra={"task":self.defname})
return
ss = "<b>DMPGirl:</b>"
es = "<br>"
answer = data[data.find(ss)+len(ss) : data.find(es,data.find(ss))]
self.emit("offer", Offer(CHAT_ACTION, answer))
t = Thread(target=answer, name=fident(answer), args=(message,))
t.daemon = True
t.start()
| gpl-3.0 |
bashburn/openshift-ansible | bin/openshift_ansible/awsutil.py | 2 | 6898 | # vim: expandtab:tabstop=4:shiftwidth=4
"""This module comprises Aws specific utility functions."""
import os
import re
# Buildbot does not have multi_inventory installed
#pylint: disable=no-name-in-module
from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
def __init__(self, message):
"""Initialize an ArgumentError.
Keyword arguments:
message -- the exact error message being raised
"""
super(ArgumentError, self).__init__()
self.message = message
class AwsUtil(object):
"""This class contains the AWS utility functions."""
def __init__(self, host_type_aliases=None):
"""Initialize the AWS utility class.
Keyword arguments:
host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
"""
host_type_aliases = host_type_aliases or {}
self.host_type_aliases = host_type_aliases
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
self.setup_host_type_alias_lookup()
def setup_host_type_alias_lookup(self):
"""Sets up the alias to host-type lookup table."""
self.alias_lookup = {}
for key, values in self.host_type_aliases.iteritems():
for value in values:
self.alias_lookup[value] = key
@staticmethod
def get_inventory(args=None, cached=False):
"""Calls the inventory script and returns a dictionary containing the inventory."
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
minv = multi_inventory.MultiInventory(args)
if cached:
minv.get_inventory_from_cache()
else:
minv.run()
return minv.result
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
pattern = re.compile(r'^tag_environment_(.*)')
envs = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
envs.append(matched.group(1))
envs.sort()
return envs
def get_host_types(self):
"""Searches for host-type tags in the inventory and returns all host-types found."""
pattern = re.compile(r'^tag_host-type_(.*)')
host_types = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
host_types.append(matched.group(1))
host_types.sort()
return host_types
def get_security_groups(self):
"""Searches for security_groups in the inventory and returns all SGs found."""
pattern = re.compile(r'^security_group_(.*)')
groups = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
groups.append(matched.group(1))
groups.sort()
return groups
def build_host_dict_by_env(self, args=None):
"""Searches the inventory for hosts in an env and returns their hostvars."""
args = args or []
inv = self.get_inventory(args)
inst_by_env = {}
for _, host in inv['_meta']['hostvars'].items():
# If you don't have an environment tag, we're going to ignore you
if 'ec2_tag_environment' not in host:
continue
if host['ec2_tag_environment'] not in inst_by_env:
inst_by_env[host['ec2_tag_environment']] = {}
host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
inst_by_env[host['ec2_tag_environment']][host_id] = host
return inst_by_env
def print_host_types(self):
"""Gets the list of host types and aliases and outputs them in columns."""
host_types = self.get_host_types()
ht_format_str = "%35s"
alias_format_str = "%-20s"
combined_format_str = ht_format_str + " " + alias_format_str
print
print combined_format_str % ('Host Types', 'Aliases')
print combined_format_str % ('----------', '-------')
for host_type in host_types:
aliases = []
if host_type in self.host_type_aliases:
aliases = self.host_type_aliases[host_type]
print combined_format_str % (host_type, ", ".join(aliases))
else:
print ht_format_str % host_type
print
def resolve_host_type(self, host_type):
"""Converts a host-type alias into a host-type.
Keyword arguments:
host_type -- The alias or host_type to look up.
Example (depends on aliases defined in config file):
host_type = ex-node
returns: openshift-node
"""
if self.alias_lookup.has_key(host_type):
return self.alias_lookup[host_type]
return host_type
@staticmethod
def gen_env_tag(env):
"""Generate the environment tag
"""
return "tag_environment_%s" % env
def gen_host_type_tag(self, host_type):
"""Generate the host type tag
"""
host_type = self.resolve_host_type(host_type)
return "tag_host-type_%s" % host_type
def get_host_list(self, host_type=None, envs=None, version=None, cached=False):
"""Get the list of hosts from the inventory using host-type and environment
"""
retval = set([])
envs = envs or []
inv = self.get_inventory(cached=cached)
# We prefer to deal with a list of environments
if issubclass(type(envs), basestring):
if envs == 'all':
envs = self.get_environments()
else:
envs = [envs]
if host_type and envs:
# Both host type and environment were specified
for env in envs:
retval.update(inv.get('tag_environment_%s' % env, []))
retval.intersection_update(inv.get(self.gen_host_type_tag(host_type), []))
elif envs and not host_type:
# Just environment was specified
for env in envs:
env_tag = AwsUtil.gen_env_tag(env)
if env_tag in inv.keys():
retval.update(inv.get(env_tag, []))
elif host_type and not envs:
# Just host-type was specified
host_type_tag = self.gen_host_type_tag(host_type)
if host_type_tag in inv.keys():
retval.update(inv.get(host_type_tag, []))
# If version is specified then return only hosts in that version
if version:
retval.intersection_update(inv.get('oo_version_%s' % version, []))
return retval
| apache-2.0 |
zeha/pdns | regression-tests.recursor-dnssec/test_API.py | 4 | 1947 | import os
import requests
from recursortests import RecursorTest
class APIRecursorTest(RecursorTest):
@classmethod
def setUpClass(cls):
# we don't need all the auth stuff
cls.setUpSockets()
cls.startResponders()
confdir = os.path.join('configs', cls._confdir)
cls.createConfigDir(confdir)
cls.generateRecursorConfig(confdir)
cls.startRecursor(confdir, cls._recursorPort)
@classmethod
def tearDownClass(cls):
cls.tearDownRecursor()
class APIAllowedRecursorTest(APIRecursorTest):
_confdir = 'API'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
webserver-allow-from=127.0.0.1
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testAPI(self):
headers = {'x-api-key': self._apiKey}
url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics'
r = requests.get(url, headers=headers, timeout=self._wsTimeout)
self.assertTrue(r)
self.assertEqual(r.status_code, 200)
self.assertTrue(r.json())
class APIDeniedRecursorTest(APIRecursorTest):
_confdir = 'API'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
webserver-allow-from=192.0.2.1
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testAPI(self):
headers = {'x-api-key': self._apiKey}
url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics'
try:
r = requests.get(url, headers=headers, timeout=self._wsTimeout)
self.assertTrue(False)
except requests.exceptions.ConnectionError as exp:
pass
| gpl-2.0 |
StefanRijnhart/odoo | addons/anonymization/__openerp__.py | 52 | 2233 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Database Anonymization',
'version': '1.0',
'category': 'Tools',
'description': """
This module allows you to anonymize a database.
===============================================
This module allows you to keep your data confidential for a given database.
This process is useful, if you want to use the migration process and protect
your own or your customer’s confidential data. The principle is that you run
an anonymization tool which will hide your confidential data(they are replaced
by ‘XXX’ characters). Then you can send the anonymized database to the migration
team. Once you get back your migrated database, you restore it and reverse the
anonymization process to recover your previous data.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'demo': ['anonymization_demo.xml'],
'data': [
'ir.model.fields.anonymization.csv',
'security/ir.model.access.csv',
'anonymization_view.xml',
],
'installable': True,
'auto_install': False,
'images': ['images/anonymization1.jpeg','images/anonymization2.jpeg','images/anonymization3.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jamesblunt/edx-platform | pavelib/utils/test/bokchoy_utils.py | 54 | 4800 | """
Helper functions for bok_choy test tasks
"""
import sys
import os
import time
import httplib
import subprocess
from paver.easy import sh
from pavelib.utils.envs import Env
from pavelib.utils.process import run_background_process
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable=invalid-name
__test__ = False # do not collect
def start_servers(default_store):
"""
Start the servers we will run tests on, returns PIDs for servers.
"""
def start_server(cmd, logfile, cwd=None):
"""
Starts a single server.
"""
print cmd, logfile
run_background_process(cmd, out_log=logfile, err_log=logfile, cwd=cwd)
for service, info in Env.BOK_CHOY_SERVERS.iteritems():
address = "0.0.0.0:{}".format(info['port'])
cmd = (
"DEFAULT_STORE={default_store} "
"coverage run --rcfile={coveragerc} -m "
"manage {service} --settings bok_choy runserver "
"{address} --traceback --noreload".format(
default_store=default_store,
coveragerc=Env.BOK_CHOY_COVERAGERC,
service=service,
address=address,
)
)
start_server(cmd, info['log'])
for service, info in Env.BOK_CHOY_STUBS.iteritems():
cmd = (
"python -m stubs.start {service} {port} "
"{config}".format(
service=service,
port=info['port'],
config=info.get('config', ''),
)
)
start_server(cmd, info['log'], cwd=Env.BOK_CHOY_STUB_DIR)
def wait_for_server(server, port):
"""
Wait for a server to respond with status 200
"""
print(
"Checking server {server} on port {port}".format(
server=server,
port=port,
)
)
attempts = 0
server_ok = False
while attempts < 20:
try:
connection = httplib.HTTPConnection(server, port, timeout=10)
connection.request('GET', '/')
response = connection.getresponse()
if int(response.status) == 200:
server_ok = True
break
except: # pylint: disable=bare-except
pass
attempts += 1
time.sleep(1)
return server_ok
def wait_for_test_servers():
"""
Wait until we get a successful response from the servers or time out
"""
for service, info in Env.BOK_CHOY_SERVERS.iteritems():
ready = wait_for_server("0.0.0.0", info['port'])
if not ready:
msg = colorize(
"red",
"Could not contact {} test server".format(service)
)
print(msg)
sys.exit(1)
def is_mongo_running():
"""
Returns True if mongo is running, False otherwise.
"""
# The mongo command will connect to the service,
# failing with a non-zero exit code if it cannot connect.
output = os.popen('mongo --eval "print(\'running\')"').read()
return (output and "running" in output)
def is_memcache_running():
"""
Returns True if memcache is running, False otherwise.
"""
# Attempt to set a key in memcache. If we cannot do so because the
# service is not available, then this will return False.
return Env.BOK_CHOY_CACHE.set('test', 'test')
def is_mysql_running():
"""
Returns True if mysql is running, False otherwise.
"""
# We need to check whether or not mysql is running as a process
# even if it is not daemonized.
with open(os.devnull, 'w') as DEVNULL:
#pgrep returns the PID, which we send to /dev/null
returncode = subprocess.call("pgrep mysqld", stdout=DEVNULL, shell=True)
return returncode == 0
def clear_mongo():
"""
Clears mongo database.
"""
sh(
"mongo {} --eval 'db.dropDatabase()' > /dev/null".format(
Env.BOK_CHOY_MONGO_DATABASE,
)
)
def check_mongo():
"""
Check that mongo is running
"""
if not is_mongo_running():
msg = colorize('red', "Mongo is not running locally.")
print(msg)
sys.exit(1)
def check_memcache():
"""
Check that memcache is running
"""
if not is_memcache_running():
msg = colorize('red', "Memcache is not running locally.")
print(msg)
sys.exit(1)
def check_mysql():
"""
Check that mysql is running
"""
if not is_mysql_running():
msg = colorize('red', "MySQL is not running locally.")
print(msg)
sys.exit(1)
def check_services():
"""
Check that all required services are running
"""
check_mongo()
check_memcache()
check_mysql()
| agpl-3.0 |
pwoodworth/intellij-community | python/lib/Lib/encodings/tis_620.py | 593 | 12556 | """ Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\ufffe'
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
zsoltdudas/lis-tempest | tempest/api/volume/admin/test_volume_types_extra_specs_negative.py | 5 | 5903 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class ExtraSpecsNegativeV2Test(base.BaseVolumeAdminTest):
@classmethod
def resource_setup(cls):
super(ExtraSpecsNegativeV2Test, cls).resource_setup()
vol_type_name = data_utils.rand_name('Volume-type')
cls.extra_specs = {"spec1": "val1"}
cls.volume_type = cls.volume_types_client.create_volume_type(
name=vol_type_name,
extra_specs=cls.extra_specs)['volume_type']
@classmethod
def resource_cleanup(cls):
cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
super(ExtraSpecsNegativeV2Test, cls).resource_cleanup()
@test.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
def test_update_no_body(self):
# Should not update volume type extra specs with no body
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], extra_spec.keys()[0], None)
@test.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
def test_update_nonexistent_extra_spec_id(self):
# Should not update volume type extra specs with nonexistent id.
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], str(uuid.uuid4()),
extra_spec)
@test.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
def test_update_none_extra_spec_id(self):
# Should not update volume type extra specs with none id.
extra_spec = {"spec1": "val2"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], None, extra_spec)
@test.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
def test_update_multiple_extra_spec(self):
# Should not update volume type extra specs with multiple specs as
# body.
extra_spec = {"spec1": "val2", "spec2": "val1"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
self.volume_type['id'], extra_spec.keys()[0],
extra_spec)
@test.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
def test_create_nonexistent_type_id(self):
# Should not create volume type extra spec for nonexistent volume
# type id.
extra_specs = {"spec2": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.create_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs)
@test.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
def test_create_none_body(self):
# Should not create volume type extra spec for none POST body.
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.create_volume_type_extra_specs,
self.volume_type['id'], None)
@test.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
def test_create_invalid_body(self):
# Should not create volume type extra spec for invalid POST body.
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.create_volume_type_extra_specs,
self.volume_type['id'], extra_specs=['invalid'])
@test.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
def test_delete_nonexistent_volume_type_id(self):
# Should not delete volume type extra spec for nonexistent
# type id.
extra_specs = {"spec1": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.delete_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
@test.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
def test_list_nonexistent_volume_type_id(self):
# Should not list volume type extra spec for nonexistent type id.
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.list_volume_types_extra_specs,
str(uuid.uuid4()))
@test.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
def test_get_nonexistent_volume_type_id(self):
# Should not get volume type extra spec for nonexistent type id.
extra_specs = {"spec1": "val1"}
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.show_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
@test.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
def test_get_nonexistent_extra_spec_id(self):
# Should not get volume type extra spec for nonexistent extra spec
# id.
self.assertRaises(
lib_exc.NotFound,
self.volume_types_client.show_volume_type_extra_specs,
self.volume_type['id'], str(uuid.uuid4()))
class ExtraSpecsNegativeV1Test(ExtraSpecsNegativeV2Test):
_api_version = 1
| apache-2.0 |
dennybaa/st2 | st2common/st2common/rbac/migrations.py | 3 | 2094 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mongoengine import NotUniqueError
from st2common.rbac.types import SystemRole
from st2common.persistence.rbac import Role
from st2common.models.db.rbac import RoleDB
from st2common.exceptions.db import StackStormDBObjectConflictError
__all__ = [
'run_all',
'insert_system_roles',
'delete_mistyped_role'
]
def run_all():
insert_system_roles()
delete_mistyped_role()
def insert_system_roles():
"""
Migration which inserts the default system roles.
"""
system_roles = SystemRole.get_valid_values()
for role_name in system_roles:
description = role_name
role_db = RoleDB(name=role_name, description=description, system=True)
try:
Role.insert(role_db, log_not_unique_error_as_debug=True)
except (StackStormDBObjectConflictError, NotUniqueError):
pass
delete_mistyped_role()
def delete_mistyped_role():
"""
Delete " system_admin" role which was fat fingered.
"""
# Note: Space is significant here since we want to remove a bad role
role_name = ' system_admin'
assert(role_name.startswith(' '))
try:
role_db = Role.get_by_name(role_name)
except:
return
if not role_db:
return
try:
Role.delete(role_db)
except:
return
| apache-2.0 |
Memeo/samba-unovero | python/samba/tests/messaging.py | 28 | 2313 | # -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.messaging."""
import samba
from samba.messaging import Messaging
from samba.tests import TestCase
from samba.dcerpc.server_id import server_id
class MessagingTests(TestCase):
def get_context(self, *args, **kwargs):
kwargs['lp_ctx'] = samba.tests.env_loadparm()
return Messaging(*args, **kwargs)
def test_register(self):
x = self.get_context()
def callback():
pass
msg_type = x.register(callback)
x.deregister(callback, msg_type)
def test_all_servers(self):
x = self.get_context()
self.assertTrue(isinstance(x.irpc_all_servers(), list))
def test_by_name(self):
x = self.get_context()
for name in x.irpc_all_servers():
self.assertTrue(isinstance(x.irpc_servers_byname(name.name), list))
def test_assign_server_id(self):
x = self.get_context()
self.assertTrue(isinstance(x.server_id, server_id))
def test_ping_speed(self):
server_ctx = self.get_context((0, 1))
def ping_callback(src, data):
server_ctx.send(src, data)
def exit_callback():
print "received exit"
msg_ping = server_ctx.register(ping_callback)
msg_exit = server_ctx.register(exit_callback)
def pong_callback():
print "received pong"
client_ctx = self.get_context((0, 2))
msg_pong = client_ctx.register(pong_callback)
client_ctx.send((0, 1), msg_ping, "testing")
client_ctx.send((0, 1), msg_ping, "")
| gpl-3.0 |
ArthurGarnier/SickRage | lib/libpasteurize/fixes/fix_unpacking.py | 60 | 5954 | u"""
Fixer for:
(a,)* *b (,c)* [,] = s
for (a,)* *b (,c)* [,] in d: ...
"""
from lib2to3 import fixer_base
from itertools import count
from lib2to3.fixer_util import (Assign, Comma, Call, Newline, Name,
Number, token, syms, Node, Leaf)
from libfuturize.fixer_util import indentation, suitify, commatize
# from libfuturize.fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf
def assignment_source(num_pre, num_post, LISTNAME, ITERNAME):
u"""
Accepts num_pre and num_post, which are counts of values
before and after the starg (not including the starg)
Returns a source fit for Assign() from fixer_util
"""
children = []
pre = unicode(num_pre)
post = unicode(num_post)
# This code builds the assignment source from lib2to3 tree primitives.
# It's not very readable, but it seems like the most correct way to do it.
if num_pre > 0:
pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Leaf(token.COLON, u":"), Number(pre)]), Leaf(token.RSQB, u"]")])])
children.append(pre_part)
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
main_part = Node(syms.power, [Leaf(token.LSQB, u"[", prefix=u" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, u""), Leaf(token.COLON, u":"), Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]) if num_post > 0 else Leaf(1, u"")]), Leaf(token.RSQB, u"]"), Leaf(token.RSQB, u"]")])])
children.append(main_part)
if num_post > 0:
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
post_part = Node(syms.power, [Name(LISTNAME, prefix=u" "), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]), Leaf(token.COLON, u":")]), Leaf(token.RSQB, u"]")])])
children.append(post_part)
source = Node(syms.arith_expr, children)
return source
class FixUnpacking(fixer_base.BaseFix):
PATTERN = u"""
expl=expr_stmt< testlist_star_expr<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > '=' source=any > |
impl=for_stmt< 'for' lst=exprlist<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > 'in' it=any ':' suite=any>"""
def fix_explicit_context(self, node, results):
pre, name, post, source = (results.get(n) for n in (u"pre", u"name", u"post", u"source"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [source.clone()]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def fix_implicit_context(self, node, results):
u"""
Only example of the implicit context is
a for loop, so only fix that.
"""
pre, name, post, it = (results.get(n) for n in (u"pre", u"name", u"post", u"it"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source = it.clone()
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [Name(self.ITERNAME)]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def transform(self, node, results):
u"""
a,b,c,d,e,f,*g,h,i = range(100) changes to
_3to2list = list(range(100))
a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:]
and
for a,b,*c,d,e in iter_of_iters: do_stuff changes to
for _3to2iter in iter_of_iters:
_3to2list = list(_3to2iter)
a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:]
do_stuff
"""
self.LISTNAME = self.new_name(u"_3to2list")
self.ITERNAME = self.new_name(u"_3to2iter")
expl, impl = results.get(u"expl"), results.get(u"impl")
if expl is not None:
setup_line, power_line = self.fix_explicit_context(node, results)
setup_line.prefix = expl.prefix
power_line.prefix = indentation(expl.parent)
setup_line.append_child(Newline())
parent = node.parent
i = node.remove()
parent.insert_child(i, power_line)
parent.insert_child(i, setup_line)
elif impl is not None:
setup_line, power_line = self.fix_implicit_context(node, results)
suitify(node)
suite = [k for k in node.children if k.type == syms.suite][0]
setup_line.prefix = u""
power_line.prefix = suite.children[1].value
suite.children[2].prefix = indentation(suite.children[2])
suite.insert_child(2, Newline())
suite.insert_child(2, power_line)
suite.insert_child(2, Newline())
suite.insert_child(2, setup_line)
results.get(u"lst").replace(Name(self.ITERNAME, prefix=u" "))
| gpl-3.0 |
vibhu0009/android_kernel_cyanogen_msm8916 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
peterbraden/tensorflow | tensorflow/contrib/learn/python/learn/datasets/text_datasets.py | 7 | 1734 | """Text datasets."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensorflow.python.platform import gfile
from tensorflow.contrib.learn.python.learn.datasets import base
DBPEDIA_URL = 'https://googledrive.com/host/0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M/dbpedia_csv.tar.gz'
def get_dbpedia(data_dir):
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download('dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL)
tfile = tarfile.open(archive_path, 'r:*')
tfile.extractall(data_dir)
train = base.load_csv(train_path, np.int32, 0, has_header=False)
test = base.load_csv(test_path, np.int32, 0, has_header=False)
datasets = base.Datasets(train=train, validation=None, test=test)
return datasets
def load_dbpedia():
return get_dbpedia('dbpedia_data')
| apache-2.0 |
sllong/googlemock | test/gmock_leak_test.py | 779 | 4384 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| bsd-3-clause |
andyzsf/django-cms | cms/south_migrations/0030_limit_visibility_in_menu_step3of3.py | 1680 | 20032 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
profxj/xastropy | xastropy/PH136/experiments/hrdiagram.py | 7 | 19512 | """Python module for the HR diagram experiment of PH136
Run on 2014 Apr 30 data from the Nickel
"""
# Module for the HR Diagram experiment
import numpy as np
import pdb
import glob
from astropy.io import fits
#import ds9
####################################
# CLASSES
###########
class Landolt_data:
"""A simple class for Landolt data"""
__slots__ = ['Name', 'RA', 'DEC', 'V', 'B-V', 'U-B', 'V-R', 'R-I', 'V-I', 'n', 'm']
def __init__(self, Name, RA, DEC, V, BV, UB, VR, RI, VI, n=0, m=0):
self.Name = Name
self.RA = RA
self.DEC = DEC
self.V = V
self.BV = BV
self.UB = UB
self.VR = VR
self.RI = RI
self.VI = VI
self.n = n
self.m = m
# Push back the apparent magnitude
def mAB(self, Filter):
# Data
allf = ['U','B','V','R','I']
allmAB = [ self.UB+self.BV+self.V,
self.BV+self.V,
self.V,
self.V-self.VR,
self.V-self.VI]
# Index
#pdb.set_trace()
try:
idx = allf.index(Filter)
except:
pdb.set_trace()
return allmAB[idx]
#
class standard_star:
"""A simple class for a standard star and its analysis"""
__slots__ = ['Name', 'RA', 'DEC', 'xpix', 'ypix', 'Filter', 'mI', 'sigmI', 'mAB']
def __init__(self, Name, xpix=0., ypix=0., Filter='', mI=0., sigmI=0., mAB=0.):
self.Name = Name # Landolt name (Field_star)
self.xpix = xpix # Detector coordinates
self.ypix = ypix # Detector coordinates
self.mI = mI
self.sigmI = sigmI
self.Filter = Filter
self.mAB = mAB
#self.sigmAB = sigmAB
#self.RA = RA
#self.DEC = DEC
def __str__(self): # Return info
lin= 'Standard Star: {} at pixel ({:.1f},{:.1f})\n'.format(self.Name,
self.xpix,
self.ypix)
lin+='Instrument magnitude: {:.2f}, {:.2f}'.format(self.mI,self.sigmI)
lin+=' for filter {}'.format(self.Filter)
if self.mAB > 0.:
lin+=' Landolt magnitude = {:.2f}'.format(self.mAB)
return lin
# Center the star on an image
def centroid(self,img,win_xy=None, No_Recenter=None, mask=None,
weight=None, Silent=None):
# Region to center about
if win_xy != None:
win_xy = (20, 20)
# Cut image
shpe = img.shape
i0 = np.max( [int(self.xpix-win_xy[0]), 0] )
i1 = np.min( [int(self.xpix+win_xy[0]), shpe[0]] )
i2 = np.max( [int(self.ypix-win_xy[1]), 0] )
i3 = np.min( [int(self.ypix+win_xy[1]), shpe[1]] )
tmp_img = img[i0:i1, i2:i3]
# Centroid
import xastropy.phot.ian_phot as iph
reload(iph)
#pdb.set_trace()
x0,y0 = iph.centroid(tmp_img,mask=mask, w=weight)
# Offset based on windowing
x0 = x0 + i0
y0 = y0 + i2
if Silent == None:
print 'Original position: ({:.1f},{:.1f})'.format(self.xpix,self.ypix)
print 'New position: ({:.1f},{:.1f})'.format(x0,y0)
# Save?
if No_Recenter == None:
self.xpix = x0
self.ypix = y0
# Turn into a FITS table
####################################
# Generate a simple ASCII log from the data
def mk_log(file_path=None,outfil=None):
# Output file
if outfil == None:
outfil = 'simple.log'
fpt = open(outfil,'w')
# Data path
if file_path == None:
file_path = 'Raw/'
files = glob.glob(file_path+'/*.fits.gz')
files.sort(key=len) # Sort by file length
lin = 'Index| File | Descript | Type | RA | DEC | Exp | Filter\n'
fpt.write(lin)
for ff in files:
print ff
# Open the file and grab the header + data
hdulist=fits.open(ff)
head = hdulist[0].header
dat = hdulist[0].data
hdulist.close
# Generate the line
lin = str(files.index(ff)).ljust(3, ' ')+'|'
lin += str(ff).ljust(20,' ')+'|'
lin += str(head['OBJECT']).ljust(15, ' ')+'|'
lin += str(head['OBSTYPE']).ljust(7, ' ')+'|'
lin += str(head['RA']).strip()+'|'
lin += (str(head['DEC']).strip()).rjust(11,'+')+'|'
lin += ' {:6.1f}'.format(head['EXPTIME'])+'|'
lin += ' '+str(head['FILTNAM']).ljust(2, ' ')
lin += '\n'
# Write
fpt.write(lin)
# Close the file
fpt.close()
####################################
# Generate the Bias frame and also Estimate+Record Read Noise
def mk_bias(file_list=None,file_path=None,outfil=None):
# There is no overscan region (yes there is!)
# So, we need a bias image
import xastropy.PH136.experiments.hrdiagram as hrd
from astropy.io.fits import getdata
from astropy.io.fits import Column
from astropy.io import fits
from astropy.stats import sigma_clip
# Defaults
if file_path == None:
file_path = 'Raw/'
if outfil == None:
outfil = 'Bias.fits'
# Files
bias_fil = None
if file_list == None:
# Generate them ourself
biasfrm = 2 + np.arange(10)
bias_fil = hrd.mk_file_list(biasfrm, file_path=file_path)
# Read Noise
arr,head = getdata(str(bias_fil[0]),0,header=True)
clip_arr = sigma_clip(arr, 2.5, None)
rnoise = np.std(clip_arr,dtype=np.float64)
print 'Read Noise = ', rnoise, ' counts'
#pdb.set_trace()
#dpt = ds9.ds9()
#dpt.set_np2arr(arr)
# Stack the frames
img = hrd.stack_img(bias_fil)
# Write
head.update('RNOISE', rnoise, 'READ NOISE')
fits.writeto(outfil, img, head, clobber=True)
return
####################################
# Generate the Sky flats for each Filter
def mk_skyflats(file_list=None,file_path=None, bias_fil=None):
import xastropy.PH136.experiments.hrdiagram as hrd
from astropy.io.fits import getdata
# Frames
B_sky = 32 + np.arange(5)
V_sky = 37 + np.arange(5)
R_sky = 43 + np.arange(5)
all_sky = [B_sky, V_sky, R_sky]
# Outfiles
outfil = ['Sky_B.fits', 'Sky_V.fits', 'Sky_R.fits']
filters = ['B','V','R']
# Bias
if bias_fil == None:
bias_fil = 'Bias.fits'
bias_img,bias_head = getdata(bias_fil,0,header=True)
# Loop on Filters
for ff in filters:
# Index
idx = filters.index(ff)
# Generate file names
files= hrd.mk_file_list(all_sky[idx])
# Stack with scaling
img = hrd.stack_img(files, bias_img=bias_img, norm=True)
# Trim
trim_img = hrd.trimflip_img(img)
# Deal with zeros
zro = np.where( trim_img == 0.)
trim_img[zro] = 1.
# Write
print 'Sky Flats: Writing ', outfil[idx]
fits.writeto(outfil[idx], trim_img, clobber=True)
print 'Sky Flats: All done'
return
####################################
# Stack a given set of images
######
def stack_img(file_list, outfil=None, norm=False, bias_img=None):
from astropy.io.fits import getdata
# file_list is a List of filenames
all_arr = []
for ff in file_list:
print 'Reading ', ff
arr,head = getdata(ff,0,header=True)
# Bias subtract?
if bias_img != None:
if arr.shape != bias_img.shape:
raise NameError('stack_img: Bad shapes!')
arr = arr - bias_img
# Normalize (for flats)
if norm:
norm_val = np.median(arr)
else:
norm_val = 1.
arr = arr/norm_val
# Save
all_arr.append(arr)
# Median Stack
all_arr = np.array(all_arr)
final_arr = np.median(all_arr,0)
# Write
if outfil != None:
print 'Writing stacked frame to ', outfil
fits.writeto(outfil, final_arr, clobber=True)
return final_arr
####################################
# Generate a list of filenames from the frame list
def mk_file_list(frames, file_path=None):
# Path
if file_path == None:
file_path = 'Raw/'
all_fil = []
for ii in frames:
fil = file_path+'d'+str(ii)+'.fits.gz'
all_fil.append(fil)
return all_fil
####################################
# Trim and flip the images down (remove overscan, etc.)
def trimflip_img(img):
# Bottom row and overscan
trim = img[1:,:1000]
#pdb.set_trace()
# flip
#pdb.set_trace()
newimg = np.flipud(trim)
return newimg
####################################
# Process M67 images
def proc_m67(file_path=None,outdir=None, bias_fil=None):
from astropy.coordinates import ICRS
from astropy.io import ascii
from astropy import units as u
from astropy.io.fits import getdata
import xastropy.PH136.experiments.hrdiagram as hrd
# Defaults
if file_path == None:
file_path = 'Raw/'
if outdir == None:
outdir = 'Science/'
# Bias frame
if bias_fil == None:
bias_fil = 'Bias.fits'
bias_img,bias_head = getdata(bias_fil,0,header=True)
# Read Log
data = ascii.read('simple.log',delimiter='|')
nfil = len(data)
all_coord = ICRS(ra=data['RA'], dec=data['DEC'], unit=(u.hour,u.degree))
# M67 coords
m67_rac = '08:54:24'
m67_decc = '+11:49:00'
m67_c = ICRS(m67_rac, m67_decc, unit=(u.hour,u.degree))
# Find all M67
sep = (m67_c.separation(all_coord)).degree
im67, = np.where( sep < 1. ) # 1 degree
m67 = data[im67]
# 5 positions
m67_ra = ['08:52:02.2', '08:52:15.3', '08:51:49.9', '08:51:50.0', '08:52:16.2']
m67_dec = ['+11:52:41.0', '+11:55:51.0', '+11:55:53.0', '+11:49:38.0', '+11:49:40.0']
m67_pointings = ICRS(ra=m67_ra, dec=m67_dec, unit=(u.hour, u.degree))
# Filters
all_filt=np.array(m67['Filter'])
filters,ifilt = np.unique(all_filt,return_index=True)
# Loop on Filterse
all_fil = []
for ff in filters:
# Load Sky frame
skyfil = 'Sky_'+ff+'.fits'
sky_img,head = getdata(skyfil,0,header=True)
# Images
idx = np.where(m67['Filter'] == ff)
# Loop on images
for kk in np.concatenate(idx,axis=0):
# Read
img,head = getdata(m67[kk]['File'],0,header=True)
# Bias subtract
img = img - bias_img
# Trim
timg = hrd.trimflip_img(img)
# Flat field
timg = timg / sky_img
# Normalize by exposure
timg = timg / m67[kk]['Exp']
# Filename
coord = ICRS(head['RA'], head['DEC'], unit=(u.hour,u.degree))
sep = (coord.separation(m67_pointings)).degree
ipos = np.argmin(sep)
outfil = outdir+'M67_C'+str(ipos)+'_t'+str(int(m67[kk]['Exp']))+'_'+ff+'.fits'
# Check for duplicate
flg_skip = 0
mt = [i for i in range(len(all_fil)) if all_fil[i] == outfil]
if len(mt) > 0:
print 'Duplicate image', outfil
print 'Skipping...'
continue
all_fil.append(outfil)
# Write
print 'Writing ', outfil
fits.writeto(outfil, timg, clobber=True)
return
####################################
# Process SA 104 images
def proc_sa104(file_path=None,outdir=None, bias_fil=None):
from astropy.coordinates import ICRS
from astropy.io import ascii
from astropy import units as u
from astropy.io.fits import getdata
import xastropy.PH136.experiments.hrdiagram as hrd
# Defaults
if file_path == None:
file_path = 'Raw/'
if outdir == None:
outdir = 'Std/'
# Bias frame
if bias_fil == None:
bias_fil = 'Bias.fits'
bias_img,bias_head = getdata(bias_fil,0,header=True)
# Read Log
data = ascii.read('simple.log',delimiter='|')
nfil = len(data)
all_coord = ICRS(ra=data['RA'], dec=data['DEC'], unit=(u.hour,u.degree))
# M67 coords
sa104_rac = '12:43:44.3'
sa104_decc = '-00:29:40.0'
sa104_c = ICRS(sa104_rac, sa104_decc, unit=(u.hour,u.degree))
# Find all SA 104
sep = (sa104_c.separation(all_coord)).degree
isa104, = np.where( sep < 1. ) # 1 degree
sa104 = data[isa104]
# Filters
all_filt=np.array(sa104['Filter'])
filters,ifilt = np.unique(all_filt,return_index=True)
# Loop on Filterse
all_fil = []
for ff in filters:
# Load Sky frame
skyfil = 'Sky_'+ff+'.fits'
sky_img,head = getdata(skyfil,0,header=True)
# Images
idx = np.where(sa104['Filter'] == ff)
# Loop on images
for kk in np.concatenate(idx,axis=0):
# Read
img,head = getdata(sa104[kk]['File'],0,header=True)
# Bias subtract
img = img - bias_img
# Trim
timg = hrd.trimflip_img(img)
# Flat field
timg = timg / sky_img
# Normalize by exposure
timg = timg / sa104[kk]['Exp']
# Filename
outfil = outdir+'SA104_t'+str(int(sa104[kk]['Exp']))+'_'+ff+'.fits'
# Check for duplicate
flg_skip = 0
mt = [i for i in range(len(all_fil)) if all_fil[i] == outfil]
if len(mt) > 0:
print 'Duplicate image', outfil
print 'Skipping...'
continue
all_fil.append(outfil)
# Write
print 'Writing ', outfil
fits.writeto(outfil, timg, head, clobber=True)
return
####################################
# Perform photometry on SA104 and calculate ZP
def phot_sa104(outfil=None):
import xastropy.PH136.experiments.hrdiagram as hrd
from astropy.io.fits import getdata
import xastropy.phot.ian_phot as iph
from astropy.stats import sigma_clip
reload(iph)
# Outfil
if outfil == None:
outfil = 'Std/ZP_SA104.fits'
# SA 104 stars (470, 350, 461)
sa104 = [ hrd.Landolt_data('104_470', '12:43:22.314', '-00:29:52.83', 14.310, 0.732,
0.101, 0.295, 0.356, 0.649),
hrd.Landolt_data('104_350', '12:43:14.204', '-00:33:20.54', 13.634, 0.673,
0.165, 0.383, 0.353, 0.736),
hrd.Landolt_data('104_461', '12:43:06.031', '-00:32:18.01', 9.705, 0.476,
-0.035, 0.288, 0.289, 0.579)]
# Set (approximate) pixel values in image
sa104_xypix = np.array( ((58.1, 976.2),
(308.0, 427.),
(645.9, 583.)))
gdstar = [1,2] # First one is too close to the edge (I think)
# Aperture: Nickel binned 2x2 = 0.
arcpix = 0.368 # arcsec/pix
aper = (np.array( (7., 15., 25.) )/arcpix).tolist()
# Grab files
std_files = glob.glob('Std/SA104*fits')
std_stars = []
afilt = []
# Loop on images
for ff in std_files:
# Read
img,head = getdata(ff,0,header=True)
filt = str(head['FILTNAM']).strip()
afilt.append(filt)
# Loop on stars
for ii in gdstar:
# Construct
std_star = hrd.standard_star(sa104[ii].Name,
sa104_xypix[ii,0], sa104_xypix[ii,1],
Filter=filt, mAB=sa104[ii].mAB(filt))
#pdb.set_trace()
# Centroid
std_star.centroid(img,win_xy=(20,20))
# Photometry
iphot = iph.aperphot(ff, pos=[std_star.xpix,std_star.ypix], dap=aper)
# Push into our data
std_star.mI = -2.5*np.log10(iphot.phot)
std_star.sigmI = 2.5 * iphot.ephot / iphot.phot / np.log(10.)
# Add to lists
std_stars.append(std_star)
# Calculate the Zero point
all_filt=np.array(afilt)
filters,ifilt = np.unique(all_filt,return_index=True)
nfilt = len(filters)
mZP = np.zeros(nfilt)
sig_mZP = np.zeros(nfilt)
# Loop on filter
idx = -1
for ff in filters:
zp = []
idx = idx + 1
# Loop on standar star obs
for std in std_stars:
if std.Filter == ff:
zp.append( std.mAB-std.mI )
if len(zp) == 0:
pdb.set_trace()
# Combine
clipzp = sigma_clip(zp,2.5,None)
mZP[idx] = np.mean(clipzp)
sig_mZP[idx] = np.std(clipzp)
# Save as FITS table
c1 = fits.Column(name='Filter',format='1A',array=filters)
c2 = fits.Column(name='ZP',format='E',array=mZP)
c3 = fits.Column(name='sig_ZP',format='E',array=sig_mZP)
tbhdu = fits.new_table([c1, c2, c3])
tbhdu.writeto(outfil, clobber=True)
print 'phot_sa104: Wrote ', outfil
#pdb.set_trace()
# 'Raw' Data too?
import pickle
rawfil = 'Std/Raw_SA104.pkl'
f = open(rawfil,'wb')
pickle.dump(std_stars,f)
f.close()
#
print 'phot_sa104: All done!'
# Loop
#for obj in sa104:
# Refine centroid
return
####################################
# Process M67 images
def sex_m67():
from subprocess import Popen, PIPE
# Get the list
m67_files = glob.glob('Science/M67*.fits')
# Loop away on M67 images
for ff in m67_files:
# Run SExtractor
p = Popen(['sex', ff, '-c', 'Sex/m67_config.sex']).wait() #, stdout=PIPE,stderr=PIPE)
# New files into
newdat = 'Sex/'+'sex_'+ff[8:-5]+'.dat'
newseg = 'Sex/'+'seg_'+ff[8:]
print 'Writing: ', newdat, newseg
# Push them
p2 = Popen(['mv', 'm67.dat', newdat]).wait()#, stdout=PIPE,stderr=PIPE)
p3 = Popen(['mv', 'check.fits', newseg]).wait()#, stdout=PIPE,stderr=PIPE)
#pdb.set_trace()
print 'sex_m67: All Done'
return
####################################
# Generate the M67 catalog
def cat_m67(outfil=None,ZP_fil=None):
from astropy.table import Table, Column, vstack
# ZP file
if ZP_fil == None:
ZP_fil = 'Std/ZP_SA104.fits'
zp_dat = Table.read(ZP_fil,format='fits')
# Sex files
m67_sex_files = glob.glob('Sex/sex_M67*.dat')
# Loop
flg=0
for ff in m67_sex_files:
# Read Table
#pdb.set_trace()
dat = Table.read(ff,format='ascii.sextractor')
ndat = len(dat)
# Alter by ZeroPoint
filt = ff[-5]
idx = np.where( zp_dat['Filter'] == 'B')[0]
ZPval = float( zp_dat['ZP'][idx] )
dat['MAG_BEST'] += ZPval
pdb.set_trace()
# Filter column
afilt = ndat * [filt]
fcolm = Column(name='FILTER',data=afilt)
# Field column
field = [ff[12:14]]
afield = ndat * field
fldcolm = Column(name='FIELD',data=afield)
dat.add_columns([fcolm,fldcolm])
if flg == 0:
all_dat = dat
flg = 1
else:
all_dat = vstack( [all_dat, dat] )
#pdb.set_trace()
# Write
if outfil == None:
outfil='M67_catalog.fits'
#pdb.set_trace()
#hdu=fits.new_table(all_dat)
all_dat.write(outfil, format='fits',overwrite=True)#, clobber=True)
print 'cat_m67: Wrote ', outfil, ' with ', len(all_dat), ' entires'
return
| bsd-3-clause |
fieldhawker/upm | examples/python/using_carrays.py | 18 | 1354 | #!/usr/bin/env python
# Author: Brendan Le Foll <brendan.le.foll@intel.com>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import pyupm_mic
mymic = pyupm_mic.Microphone(1)
# careful this is an unitialised array with no bounds checking!
x = pyupm_mic.uint16Array(3)
mymic.getSampledWindow(100, 3, x)
| mit |
subutai/nupic.research | nupic/research/frameworks/pytorch/lr_scheduler.py | 3 | 7197 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import copy
from bisect import bisect
from torch.optim.lr_scheduler import OneCycleLR, _LRScheduler
class ComposedLRScheduler(_LRScheduler):
"""
Learning scheduler composed of different LR schedulers and optimizer
parameters to be effective once the number of epochs reaches the specified
epoch milestone. Similar to :class:`torch.optim.lr_scheduler.MultiStepLR`
but instead of just updating the LR at the epoch milestone it replaces the
LR Scheduler and update other optimizer parameters.
For example::
# Use "OneCycleLR" for the first 35 epochs and "StepLR" for the rest
lr_scheduler = ComposedLRScheduler(schedulers={
0: dict(
lr_scheduler_class=torch.optim.lr_scheduler.OneCycleLR,
lr_scheduler_args=dict(
max_lr=6.0,
div_factor=6, # initial_lr = 1.0
final_div_factor=4000, # min_lr = 0.00025
pct_start=4.0 / 35.0,
epochs=35,
steps_per_epoch=len(train_loader),
anneal_strategy="linear",
max_momentum=0.01,
cycle_momentum=False,
),
optimizer_args=dict(
lr=0.1,
weight_decay=0.0001,
momentum=0.0,
nesterov=False,
),
),
35: dict(
lr_scheduler_class=torch.optim.lr_scheduler.StepLR,
lr_scheduler_args=dict(
gamma=0.1,
step_size=10,
),
optimizer_args=dict(
lr=0.1,
weight_decay=1e-04,
momentum=0.9,
dampening=0,
nesterov=True
),
),
})
:param optimizer:
Wrapped optimizer
:type optimizer: torch.optim.optimizer.Optimizer
:param schedulers:
dict mapping epoch milestones to LRScheduler and Optimizer parameters
with the following fields:
- "optimizer_args": Optimizer arguments to override
- "lr_scheduler_class": LR Scheduler class
- "lr_scheduler_args": LR Scheduler class constructor args in addition
to optimizer
:type schedulers: dict[int, dict]
:param steps_per_epoch: Number of batches/steps per epoch. Must be specified
when the LR is updated on every batch. Default 1
:type steps_per_epoch: int
:param last_step:
The index of last step. Default: -1.
:type last_epoch: int
"""
def __init__(self, optimizer, schedulers, steps_per_epoch=1, last_epoch=-1):
self.schedulers = schedulers
self.steps_per_epoch = steps_per_epoch
self.lr_scheduler = None
self.active_milestone = None
self.milestones = sorted(self.schedulers.keys())
assert len(self.milestones) > 0
super().__init__(optimizer=optimizer, last_epoch=last_epoch)
def step(self):
"""
Step should be called after every batch update if OneCycleLR is one of
the mapped LR Schedulers. Make sure to specify "steps_per_epoch" when
"""
# Get milestone for current step
current_step = self.last_epoch + 1
current_epoch = current_step // self.steps_per_epoch
current_batch = current_step % self.steps_per_epoch
current_milestone = self.milestones[bisect(self.milestones, current_epoch) - 1]
# Update LR scheduler and optimizer once the milestone changes
if self.active_milestone != current_milestone:
self.active_milestone = current_milestone
self._update_optimizer()
self._update_lr_scheduler()
elif isinstance(self.lr_scheduler, OneCycleLR):
# Step every batch
self.lr_scheduler.step()
elif current_batch == 0:
# Step once per epoch
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.last_epoch += 1
def get_lr(self):
return self.lr_scheduler.get_lr()
def get_last_lr(self):
return self.lr_scheduler.get_last_lr()
def _update_optimizer(self):
params = self.schedulers[self.active_milestone]
# Re-initialize optimizer using the default values
args = copy.deepcopy(self.optimizer.defaults)
# Override parameters for this milestone
args.update(params.get("optimizer_args", {}))
# Update parameters for all parameter groups
for group in self.optimizer.param_groups:
group.update(args)
def _update_lr_scheduler(self):
params = self.schedulers[self.active_milestone]
lr_scheduler_class = params.get("lr_scheduler_class", None)
if lr_scheduler_class is not None:
lr_scheduler_args = params.get("lr_scheduler_args", None)
for group in self.optimizer.param_groups:
# reset initial_lr for new scheduler
group.pop("initial_lr", None)
self.lr_scheduler = lr_scheduler_class(self.optimizer, **lr_scheduler_args)
class LinearLRScheduler(_LRScheduler):
"""
Linearly ramps up the learning rate from min_lr to max_lr. Useful for test
scenarios such as the LR-range test (https://arxiv.org/pdf/1803.09820.pdf)
:param min_lr: starting learning rate
:param max_lr: ending learning rate
:param epochs: number of epochs in training
:param steps_per_epoch: number of optimizer steps in each epoch
"""
def __init__(
self, optimizer, min_lr, max_lr, epochs, steps_per_epoch, last_epoch=-1
):
self.min_lr = min_lr
self.max_lr = max_lr
self.total_steps = epochs * steps_per_epoch
super().__init__(optimizer, last_epoch=last_epoch)
@property
def current_step(self):
return self.last_epoch
def get_lr(self):
"""
Return a linear interpolation between min_lr and max_lr given the current and
total number of steps.
"""
lr_slope = (self.max_lr - self.min_lr) / (self.total_steps - 1)
lr_delta = lr_slope * self.current_step
return [self.min_lr + lr_delta]
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/django/conf/locale/hu/formats.py | 115 | 1117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y.%m.%d.', # '2006.10.25.'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| mit |
karan1276/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_genscript.py | 194 | 1689 | import pytest
import sys
@pytest.fixture(scope="module")
def standalone(request):
return Standalone(request)
class Standalone:
def __init__(self, request):
self.testdir = request.getfuncargvalue("testdir")
script = "mypytest"
result = self.testdir.runpytest("--genscript=%s" % script)
assert result.ret == 0
self.script = self.testdir.tmpdir.join(script)
assert self.script.check()
def run(self, anypython, testdir, *args):
return testdir._run(anypython, self.script, *args)
def test_gen(testdir, anypython, standalone):
if sys.version_info >= (2,7):
result = testdir._run(anypython, "-c",
"import sys;print (sys.version_info >=(2,7))")
assert result.ret == 0
if result.stdout.str() == "False":
pytest.skip("genscript called from python2.7 cannot work "
"earlier python versions")
result = standalone.run(anypython, testdir, '--version')
if result.ret == 2:
result.stderr.fnmatch_lines(["*ERROR: setuptools not installed*"])
elif result.ret == 0:
result.stderr.fnmatch_lines([
"*imported from*mypytest*"
])
p = testdir.makepyfile("def test_func(): assert 0")
result = standalone.run(anypython, testdir, p)
assert result.ret != 0
else:
pytest.fail("Unexpected return code")
def test_freeze_includes():
"""
Smoke test for freeze_includes(), to ensure that it works across all
supported python versions.
"""
includes = pytest.freeze_includes()
assert len(includes) > 1
assert '_pytest.genscript' in includes
| mpl-2.0 |
manasapte/pants | src/python/pants/util/rwbuf.py | 18 | 2259 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
from six import StringIO
class _RWBuf(object):
"""An unbounded read-write buffer.
Can be used as a file-like object for reading and writing.
Subclasses implement write functionality."""
def __init__(self, io):
self._lock = threading.Lock()
self._io = io
self._readpos = 0
def read(self, size=-1):
with self._lock:
self._io.seek(self._readpos)
ret = self._io.read() if size == -1 else self._io.read(size)
self._readpos = self._io.tell()
return ret
def read_from(self, pos, size=-1):
with self._lock:
self._io.seek(pos)
return self._io.read() if size == -1 else self._io.read(size)
def write(self, s):
with self._lock:
self.do_write(str(s))
self._io.flush()
def flush(self):
with self._lock:
self._io.flush()
def close(self):
self._io.close()
def do_write(self, s):
raise NotImplementedError
class InMemoryRWBuf(_RWBuf):
"""An unbounded read-write buffer entirely in memory.
Can be used as a file-like object for reading and writing. Note that it can't be used in
situations that require a real file (e.g., redirecting stdout/stderr of subprocess.Popen())."""
def __init__(self):
super(InMemoryRWBuf, self).__init__(StringIO())
self._writepos = 0
def do_write(self, s):
self._io.seek(self._writepos)
self._io.write(s)
self._writepos = self._io.tell()
class FileBackedRWBuf(_RWBuf):
"""An unbounded read-write buffer backed by a file.
Can be used as a file-like object for reading and writing the underlying file. Has a fileno,
so you can redirect stdout/stderr of subprocess.Popen() etc. to this object. This is useful
when you want to poll the output of long-running subprocesses in a separate thread."""
def __init__(self, backing_file):
_RWBuf.__init__(self, open(backing_file, 'a+'))
self.fileno = self._io.fileno
def do_write(self, s):
self._io.write(s)
| apache-2.0 |
ntonjeta/iidea-Docker | examples/sobel/src/boost_1_63_0/tools/build/test/link.py | 26 | 10779 | #!/usr/bin/python
# Copyright 2014-2015 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests the link-directory rule used to create the
# common boost/ directory in the new git layout.
import BoostBuild
def ignore_config(t):
"""These files are created by the configuration logic in link.jam
They may or may not exist, depending on the system."""
t.ignore("bin/symlink/test-hardlink")
t.ignore("bin/test-hardlink-source")
t.ignore("bin/test-symlink")
t.ignore("bin/test-symlink-source")
def test_basic():
"""Test creation of a single link"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_two():
"""Test merging two directories"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_existing(group1, group2):
"""Test adding a link when a different symlink already exists"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system(group1)
if "dir1-link" in group1:
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
if "dir2-link" in group1:
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
ignore_config(t)
t.expect_nothing_more()
t.run_build_system(group2)
if "dir1-link" in group2:
if "dir1-link" not in group1:
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
else:
t.ignore_removal("include/file1.h")
if "dir2-link" in group2:
if "dir2-link" not in group1:
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
else:
t.ignore_removal("include/file2.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_existing_all():
test_merge_existing(["dir1-link"], ["dir2-link"])
test_merge_existing(["dir2-link"], ["dir1-link"])
test_merge_existing(["dir1-link"], ["dir1-link", "dir2-link"])
test_merge_existing(["dir2-link"], ["dir1-link", "dir2-link"])
def test_merge_recursive():
"Test merging several directories including common prefixes"
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
link-directory dir3-link : src/dir3/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.write("src/dir2/include/nested/file3.h", "file3")
t.write("src/dir3/include/nested/file4.h", "file4")
t.run_build_system()
t.expect_addition("include/file1.h")
t.expect_content("include/file1.h", "file1")
t.expect_addition("include/file2.h")
t.expect_content("include/file2.h", "file2")
t.expect_addition("include/nested/file3.h")
t.expect_content("include/nested/file3.h", "file3")
t.expect_addition("include/nested/file4.h")
t.expect_content("include/nested/file4.h", "file4")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_recursive_existing(group1, group2):
"Test merging several directories including common prefixes."
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
link-directory dir3-link : src/dir3/include : <location>. ;
link-directory dir4-link : src/dir4/include : <location>. ;
link-directory dir5-link : src/dir5/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/nested/file2.h", "file2")
t.write("src/dir3/include/nested/file3.h", "file3")
t.write("src/dir4/include/nested/xxx/yyy/file4.h", "file4")
t.write("src/dir5/include/nested/xxx/yyy/file5.h", "file5")
t.run_build_system(group1)
t.run_build_system(group2 + ["-d+12"])
t.ignore_addition("include/file1.h")
t.ignore_addition("include/nested/file2.h")
t.ignore_addition("include/nested/file3.h")
t.ignore_addition("include/nested/xxx/yyy/file4.h")
t.ignore_addition("include/nested/xxx/yyy/file5.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_merge_recursive_existing_all():
# These should create a link
test_merge_recursive_existing(["dir2-link"], ["dir2-link", "dir1-link"])
test_merge_recursive_existing(["dir2-link"], ["dir1-link", "dir2-link"])
# These should create a directory
test_merge_recursive_existing(["dir2-link"], ["dir2-link", "dir3-link"])
test_merge_recursive_existing(["dir2-link"], ["dir3-link", "dir2-link"])
# It should work even if we have to create many intermediate subdirectories
test_merge_recursive_existing(["dir4-link"], ["dir4-link", "dir5-link"])
test_merge_recursive_existing(["dir4-link"], ["dir5-link", "dir4-link"])
def test_include_scan():
"""Make sure that the #include scanner finds the headers"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
obj test : test.cpp :
<include>include
<implicit-dependency>dir1-link
<implicit-dependency>dir2-link ;
""")
t.write("src/dir1/include/file1.h", "#include <file2.h>\n")
t.write("src/dir2/include/file2.h", "int f();\n")
t.write("test.cpp", """\
#include <file1.h>
int main() { f(); }
""");
t.run_build_system(["test"])
t.expect_addition("bin/$toolset/debug/test.obj")
t.run_build_system()
t.expect_nothing_more()
t.cleanup()
def test_include_scan_merge_existing():
"""Make sure that files are replaced if needed when merging in
a new directory"""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
obj test : test.cpp :
<include>include
<implicit-dependency>dir1-link
<implicit-dependency>dir2-link ;
""")
t.write("src/dir1/include/file1.h", "int f();")
t.write("src/dir2/include/file2.h", "#include <file1.h>")
t.write("test.cpp", """\
#include <file2.h>
int main() { f(); }
""")
t.run_build_system(["dir2-link"])
t.run_build_system(["test"])
t.expect_addition("include/file1.h")
t.expect_addition("bin/$toolset/debug/test.obj")
t.expect_nothing_more()
t.cleanup()
def test_update_file_link(params1, params2):
"""Tests the behavior of updates when changing the link mode.
The link needs to be updated iff the original was a copy."""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
import project ;
import property-set ;
import modules ;
if --no-symlinks in [ modules.peek : ARGV ]
{
modules.poke link : .can-symlink : false ;
}
if --no-hardlinks in [ modules.peek : ARGV ]
{
modules.poke link : .can-hardlink : false ;
}
.project = [ project.current ] ;
.has-files = [ glob include/file1.h ] ;
rule can-link ( properties * ) {
if ( ! [ link.can-symlink $(.project) : [ property-set.empty ] ] ) &&
( ! [ link.can-hardlink $(.project) : [ property-set.empty ] ] )
{
ECHO links unsupported ;
}
}
# Use two directories so that we link to individual files.
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
alias check-linking : : <conditional>@can-link ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file2.h", "file2")
t.run_build_system(params1)
ignore_config(t)
t.expect_addition("include/file1.h")
t.expect_addition("include/file2.h")
t.expect_nothing_more()
using_links = "links unsupported" not in t.stdout()
t.touch("src/dir1/include/file1.h")
t.run_build_system(params2)
if not using_links: t.expect_touch("include/file1.h")
ignore_config(t)
t.expect_nothing_more()
t.cleanup()
def test_update_file_link_all():
"""Test all nine possible combinations of two runs."""
possible_args = [[], ["--no-symlinks"], ["--no-symlinks", "--no-hardlinks"]]
for arg1 in possible_args:
for arg2 in possible_args:
test_update_file_link(arg1, arg2)
def test_error_duplicate():
"""Test that linking a single file from
multiple sources causes a hard error."""
t = BoostBuild.Tester()
t.write("jamroot.jam", """\
import link ;
link-directory dir1-link : src/dir1/include : <location>. ;
link-directory dir2-link : src/dir2/include : <location>. ;
""")
t.write("src/dir1/include/file1.h", "file1")
t.write("src/dir2/include/file1.h", "file2")
t.run_build_system(status=1)
t.expect_output_lines(
["error: Cannot create link include/file1.h to src/dir2/include/file1.h.",
"error: Link previously defined to another file, src/dir1/include/file1.h."])
t.cleanup()
test_basic()
test_merge_two()
test_merge_existing_all()
test_merge_recursive()
test_merge_recursive_existing_all()
test_include_scan()
test_include_scan_merge_existing()
test_update_file_link_all()
test_error_duplicate()
| agpl-3.0 |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/python/Lib/sndhdr.py | 257 | 5973 | """Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, IOError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ["what","whathdr"]
def what(filename):
"""Guess the type of a sound file"""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers"""
f = open(filename, 'rb')
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return res
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if h[:4] != 'FORM':
return None
if h[8:12] == 'AIFC':
fmt = 'aifc'
elif h[8:12] == 'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.openfp(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(), \
a.getnframes(), 8*a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h[:4] == '.snd':
f = get_long_be
elif h[:4] in ('\0ds.', 'dns.'):
f = get_long_le
else:
return None
type = 'au'
hdr_size = f(h[4:8])
data_size = f(h[8:12])
encoding = f(h[12:16])
rate = f(h[16:20])
nchannels = f(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
return type, rate, nchannels, data_size//frame_size, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != 'FSSD' or h[128:132] != 'HCOM':
return None
divisor = get_long_be(h[128+16:128+20])
return 'hcom', 22050//divisor, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if h[:20] != 'Creative Voice File\032':
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == '\1':
ratecode = ord(h[sbseek+4])
rate = int(1000000.0 / (256 - ratecode))
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ':
return None
style = get_short_le(h[20:22])
nchannels = get_short_le(h[22:24])
rate = get_long_le(h[24:28])
sample_bits = get_short_le(h[34:36])
return 'wav', rate, nchannels, -1, sample_bits
tests.append(test_wav)
def test_8svx(h, f):
if h[:4] != 'FORM' or h[8:12] != '8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h[:5] == 'SOUND':
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h[:2] == '\0\0':
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#---------------------------------------------#
# Subroutines to extract numbers from strings #
#---------------------------------------------#
def get_long_be(s):
return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
def get_long_le(s):
return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
def get_short_be(s):
return (ord(s[0])<<8) | ord(s[1])
def get_short_le(s):
return (ord(s[1])<<8) | ord(s[0])
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
if __name__ == '__main__':
test()
| apache-2.0 |
SuYiling/chrome_depot_tools | git_cache.py | 8 | 23656 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A git command for managing a local cache of git repositories."""
from __future__ import print_function
import errno
import logging
import optparse
import os
import re
import tempfile
import threading
import time
import subprocess
import sys
import urlparse
import zipfile
from download_from_google_storage import Gsutil
import gclient_utils
import subcommand
# Analogous to gc.autopacklimit git config.
GC_AUTOPACKLIMIT = 50
GIT_CACHE_CORRUPT_MESSAGE = 'WARNING: The Git cache is corrupt.'
try:
# pylint: disable=E0602
WinErr = WindowsError
except NameError:
class WinErr(Exception):
pass
class LockError(Exception):
pass
class RefsHeadsFailedToFetch(Exception):
pass
class Lockfile(object):
"""Class to represent a cross-platform process-specific lockfile."""
def __init__(self, path):
self.path = os.path.abspath(path)
self.lockfile = self.path + ".lock"
self.pid = os.getpid()
def _read_pid(self):
"""Read the pid stored in the lockfile.
Note: This method is potentially racy. By the time it returns the lockfile
may have been unlocked, removed, or stolen by some other process.
"""
try:
with open(self.lockfile, 'r') as f:
pid = int(f.readline().strip())
except (IOError, ValueError):
pid = None
return pid
def _make_lockfile(self):
"""Safely creates a lockfile containing the current pid."""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
fd = os.open(self.lockfile, open_flags, 0o644)
f = os.fdopen(fd, 'w')
print(self.pid, file=f)
f.close()
def _remove_lockfile(self):
"""Delete the lockfile. Complains (implicitly) if it doesn't exist.
See gclient_utils.py:rmtree docstring for more explanation on the
windows case.
"""
if sys.platform == 'win32':
lockfile = os.path.normcase(self.lockfile)
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c',
'del', '/f', '/q', lockfile])
if exitcode == 0:
return
time.sleep(3)
raise LockError('Failed to remove lock: %s' % lockfile)
else:
os.remove(self.lockfile)
def lock(self):
"""Acquire the lock.
Note: This is a NON-BLOCKING FAIL-FAST operation.
Do. Or do not. There is no try.
"""
try:
self._make_lockfile()
except OSError as e:
if e.errno == errno.EEXIST:
raise LockError("%s is already locked" % self.path)
else:
raise LockError("Failed to create %s (err %s)" % (self.path, e.errno))
def unlock(self):
"""Release the lock."""
try:
if not self.is_locked():
raise LockError("%s is not locked" % self.path)
if not self.i_am_locking():
raise LockError("%s is locked, but not by me" % self.path)
self._remove_lockfile()
except WinErr:
# Windows is unreliable when it comes to file locking. YMMV.
pass
def break_lock(self):
"""Remove the lock, even if it was created by someone else."""
try:
self._remove_lockfile()
return True
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
def is_locked(self):
"""Test if the file is locked by anyone.
Note: This method is potentially racy. By the time it returns the lockfile
may have been unlocked, removed, or stolen by some other process.
"""
return os.path.exists(self.lockfile)
def i_am_locking(self):
"""Test if the file is locked by this process."""
return self.is_locked() and self.pid == self._read_pid()
class Mirror(object):
git_exe = 'git.bat' if sys.platform.startswith('win') else 'git'
gsutil_exe = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
cachepath_lock = threading.Lock()
def __init__(self, url, refs=None, print_func=None):
self.url = url
self.refs = refs or []
self.basedir = self.UrlToCacheDir(url)
self.mirror_path = os.path.join(self.GetCachePath(), self.basedir)
if print_func:
self.print = self.print_without_file
self.print_func = print_func
else:
self.print = print
def print_without_file(self, message, **kwargs):
self.print_func(message)
@property
def bootstrap_bucket(self):
if 'chrome-internal' in self.url:
return 'chrome-git-cache'
else:
return 'chromium-git-cache'
@classmethod
def FromPath(cls, path):
return cls(cls.CacheDirToUrl(path))
@staticmethod
def UrlToCacheDir(url):
"""Convert a git url to a normalized form for the cache dir path."""
parsed = urlparse.urlparse(url)
norm_url = parsed.netloc + parsed.path
if norm_url.endswith('.git'):
norm_url = norm_url[:-len('.git')]
return norm_url.replace('-', '--').replace('/', '-').lower()
@staticmethod
def CacheDirToUrl(path):
"""Convert a cache dir path to its corresponding url."""
netpath = re.sub(r'\b-\b', '/', os.path.basename(path)).replace('--', '-')
return 'https://%s' % netpath
@classmethod
def SetCachePath(cls, cachepath):
with cls.cachepath_lock:
setattr(cls, 'cachepath', cachepath)
@classmethod
def GetCachePath(cls):
with cls.cachepath_lock:
if not hasattr(cls, 'cachepath'):
try:
cachepath = subprocess.check_output(
[cls.git_exe, 'config', '--global', 'cache.cachepath']).strip()
except subprocess.CalledProcessError:
cachepath = None
if not cachepath:
raise RuntimeError(
'No global cache.cachepath git configuration found.')
setattr(cls, 'cachepath', cachepath)
return getattr(cls, 'cachepath')
def RunGit(self, cmd, **kwargs):
"""Run git in a subprocess."""
cwd = kwargs.setdefault('cwd', self.mirror_path)
kwargs.setdefault('print_stdout', False)
kwargs.setdefault('filter_fn', self.print)
env = kwargs.get('env') or kwargs.setdefault('env', os.environ.copy())
env.setdefault('GIT_ASKPASS', 'true')
env.setdefault('SSH_ASKPASS', 'true')
self.print('running "git %s" in "%s"' % (' '.join(cmd), cwd))
gclient_utils.CheckCallAndFilter([self.git_exe] + cmd, **kwargs)
def config(self, cwd=None):
if cwd is None:
cwd = self.mirror_path
# Don't run git-gc in a daemon. Bad things can happen if it gets killed.
self.RunGit(['config', 'gc.autodetach', '0'], cwd=cwd)
# Don't combine pack files into one big pack file. It's really slow for
# repositories, and there's no way to track progress and make sure it's
# not stuck.
self.RunGit(['config', 'gc.autopacklimit', '0'], cwd=cwd)
# Allocate more RAM for cache-ing delta chains, for better performance
# of "Resolving deltas".
self.RunGit(['config', 'core.deltaBaseCacheLimit',
gclient_utils.DefaultDeltaBaseCacheLimit()], cwd=cwd)
self.RunGit(['config', 'remote.origin.url', self.url], cwd=cwd)
self.RunGit(['config', '--replace-all', 'remote.origin.fetch',
'+refs/heads/*:refs/heads/*', r'\+refs/heads/\*:.*'], cwd=cwd)
for ref in self.refs:
ref = ref.lstrip('+').rstrip('/')
if ref.startswith('refs/'):
refspec = '+%s:%s' % (ref, ref)
regex = r'\+%s:.*' % ref.replace('*', r'\*')
else:
refspec = '+refs/%s/*:refs/%s/*' % (ref, ref)
regex = r'\+refs/heads/%s:.*' % ref.replace('*', r'\*')
self.RunGit(
['config', '--replace-all', 'remote.origin.fetch', refspec, regex],
cwd=cwd)
def bootstrap_repo(self, directory):
"""Bootstrap the repo from Google Stroage if possible.
More apt-ly named bootstrap_repo_from_cloud_if_possible_else_do_nothing().
"""
python_fallback = False
if (sys.platform.startswith('win') and
not gclient_utils.FindExecutable('7z')):
python_fallback = True
elif sys.platform.startswith('darwin'):
# The OSX version of unzip doesn't support zip64.
python_fallback = True
elif not gclient_utils.FindExecutable('unzip'):
python_fallback = True
gs_folder = 'gs://%s/%s' % (self.bootstrap_bucket, self.basedir)
gsutil = Gsutil(self.gsutil_exe, boto_path=None)
# Get the most recent version of the zipfile.
_, ls_out, _ = gsutil.check_call('ls', gs_folder)
ls_out_sorted = sorted(ls_out.splitlines())
if not ls_out_sorted:
# This repo is not on Google Storage.
return False
latest_checkout = ls_out_sorted[-1]
# Download zip file to a temporary directory.
try:
tempdir = tempfile.mkdtemp(prefix='_cache_tmp', dir=self.GetCachePath())
self.print('Downloading %s' % latest_checkout)
code = gsutil.call('cp', latest_checkout, tempdir)
if code:
return False
filename = os.path.join(tempdir, latest_checkout.split('/')[-1])
# Unpack the file with 7z on Windows, unzip on linux, or fallback.
if not python_fallback:
if sys.platform.startswith('win'):
cmd = ['7z', 'x', '-o%s' % directory, '-tzip', filename]
else:
cmd = ['unzip', filename, '-d', directory]
retcode = subprocess.call(cmd)
else:
try:
with zipfile.ZipFile(filename, 'r') as f:
f.printdir()
f.extractall(directory)
except Exception as e:
self.print('Encountered error: %s' % str(e), file=sys.stderr)
retcode = 1
else:
retcode = 0
finally:
# Clean up the downloaded zipfile.
gclient_utils.rm_file_or_tree(tempdir)
if retcode:
self.print(
'Extracting bootstrap zipfile %s failed.\n'
'Resuming normal operations.' % filename)
return False
return True
def exists(self):
return os.path.isfile(os.path.join(self.mirror_path, 'config'))
def _ensure_bootstrapped(self, depth, bootstrap, force=False):
tempdir = None
config_file = os.path.join(self.mirror_path, 'config')
pack_dir = os.path.join(self.mirror_path, 'objects', 'pack')
pack_files = []
if os.path.isdir(pack_dir):
pack_files = [f for f in os.listdir(pack_dir) if f.endswith('.pack')]
should_bootstrap = (force or
not os.path.exists(config_file) or
len(pack_files) > GC_AUTOPACKLIMIT)
if should_bootstrap:
tempdir = tempfile.mkdtemp(
prefix='_cache_tmp', suffix=self.basedir, dir=self.GetCachePath())
bootstrapped = not depth and bootstrap and self.bootstrap_repo(tempdir)
if bootstrapped:
# Bootstrap succeeded; delete previous cache, if any.
gclient_utils.rmtree(self.mirror_path)
elif not os.path.exists(config_file):
# Bootstrap failed, no previous cache; start with a bare git dir.
self.RunGit(['init', '--bare'], cwd=tempdir)
else:
# Bootstrap failed, previous cache exists; warn and continue.
logging.warn(
'Git cache has a lot of pack files (%d). Tried to re-bootstrap '
'but failed. Continuing with non-optimized repository.'
% len(pack_files))
gclient_utils.rmtree(tempdir)
tempdir = None
else:
if depth and os.path.exists(os.path.join(self.mirror_path, 'shallow')):
logging.warn(
'Shallow fetch requested, but repo cache already exists.')
return tempdir
def _fetch(self, rundir, verbose, depth):
self.config(rundir)
v = []
d = []
if verbose:
v = ['-v', '--progress']
if depth:
d = ['--depth', str(depth)]
fetch_cmd = ['fetch'] + v + d + ['origin']
fetch_specs = subprocess.check_output(
[self.git_exe, 'config', '--get-all', 'remote.origin.fetch'],
cwd=rundir).strip().splitlines()
for spec in fetch_specs:
try:
self.print('Fetching %s' % spec)
self.RunGit(fetch_cmd + [spec], cwd=rundir, retry=True)
except subprocess.CalledProcessError:
if spec == '+refs/heads/*:refs/heads/*':
raise RefsHeadsFailedToFetch
logging.warn('Fetch of %s failed' % spec)
def populate(self, depth=None, shallow=False, bootstrap=False,
verbose=False, ignore_lock=False):
assert self.GetCachePath()
if shallow and not depth:
depth = 10000
gclient_utils.safe_makedirs(self.GetCachePath())
lockfile = Lockfile(self.mirror_path)
if not ignore_lock:
lockfile.lock()
tempdir = None
try:
tempdir = self._ensure_bootstrapped(depth, bootstrap)
rundir = tempdir or self.mirror_path
self._fetch(rundir, verbose, depth)
except RefsHeadsFailedToFetch:
# This is a major failure, we need to clean and force a bootstrap.
gclient_utils.rmtree(rundir)
self.print(GIT_CACHE_CORRUPT_MESSAGE)
tempdir = self._ensure_bootstrapped(depth, bootstrap, force=True)
assert tempdir
self._fetch(tempdir or self.mirror_path, verbose, depth)
finally:
if tempdir:
try:
if os.path.exists(self.mirror_path):
gclient_utils.rmtree(self.mirror_path)
os.rename(tempdir, self.mirror_path)
except OSError as e:
# This is somehow racy on Windows.
# Catching OSError because WindowsError isn't portable and
# pylint complains.
self.print('Error moving %s to %s: %s' % (tempdir, self.mirror_path,
str(e)))
if not ignore_lock:
lockfile.unlock()
def update_bootstrap(self, prune=False):
# The files are named <git number>.zip
gen_number = subprocess.check_output(
[self.git_exe, 'number', 'master'], cwd=self.mirror_path).strip()
# Run Garbage Collect to compress packfile.
self.RunGit(['gc', '--prune=all'])
# Creating a temp file and then deleting it ensures we can use this name.
_, tmp_zipfile = tempfile.mkstemp(suffix='.zip')
os.remove(tmp_zipfile)
subprocess.call(['zip', '-r', tmp_zipfile, '.'], cwd=self.mirror_path)
gsutil = Gsutil(path=self.gsutil_exe, boto_path=None)
gs_folder = 'gs://%s/%s' % (self.bootstrap_bucket, self.basedir)
dest_name = '%s/%s.zip' % (gs_folder, gen_number)
gsutil.call('cp', tmp_zipfile, dest_name)
os.remove(tmp_zipfile)
# Remove all other files in the same directory.
if prune:
_, ls_out, _ = gsutil.check_call('ls', gs_folder)
for filename in ls_out.splitlines():
if filename == dest_name:
continue
gsutil.call('rm', filename)
@staticmethod
def DeleteTmpPackFiles(path):
pack_dir = os.path.join(path, 'objects', 'pack')
if not os.path.isdir(pack_dir):
return
pack_files = [f for f in os.listdir(pack_dir) if
f.startswith('.tmp-') or f.startswith('tmp_pack_')]
for f in pack_files:
f = os.path.join(pack_dir, f)
try:
os.remove(f)
logging.warn('Deleted stale temporary pack file %s' % f)
except OSError:
logging.warn('Unable to delete temporary pack file %s' % f)
@classmethod
def BreakLocks(cls, path):
did_unlock = False
lf = Lockfile(path)
if lf.break_lock():
did_unlock = True
# Look for lock files that might have been left behind by an interrupted
# git process.
lf = os.path.join(path, 'config.lock')
if os.path.exists(lf):
os.remove(lf)
did_unlock = True
cls.DeleteTmpPackFiles(path)
return did_unlock
def unlock(self):
return self.BreakLocks(self.mirror_path)
@classmethod
def UnlockAll(cls):
cachepath = cls.GetCachePath()
if not cachepath:
return
dirlist = os.listdir(cachepath)
repo_dirs = set([os.path.join(cachepath, path) for path in dirlist
if os.path.isdir(os.path.join(cachepath, path))])
for dirent in dirlist:
if dirent.startswith('_cache_tmp') or dirent.startswith('tmp'):
gclient_utils.rm_file_or_tree(os.path.join(cachepath, dirent))
elif (dirent.endswith('.lock') and
os.path.isfile(os.path.join(cachepath, dirent))):
repo_dirs.add(os.path.join(cachepath, dirent[:-5]))
unlocked_repos = []
for repo_dir in repo_dirs:
if cls.BreakLocks(repo_dir):
unlocked_repos.append(repo_dir)
return unlocked_repos
@subcommand.usage('[url of repo to check for caching]')
def CMDexists(parser, args):
"""Check to see if there already is a cache of the given repo."""
_, args = parser.parse_args(args)
if not len(args) == 1:
parser.error('git cache exists only takes exactly one repo url.')
url = args[0]
mirror = Mirror(url)
if mirror.exists():
print(mirror.mirror_path)
return 0
return 1
@subcommand.usage('[url of repo to create a bootstrap zip file]')
def CMDupdate_bootstrap(parser, args):
"""Create and uploads a bootstrap tarball."""
# Lets just assert we can't do this on Windows.
if sys.platform.startswith('win'):
print('Sorry, update bootstrap will not work on Windows.', file=sys.stderr)
return 1
parser.add_option('--prune', action='store_true',
help='Prune all other cached zipballs of the same repo.')
# First, we need to ensure the cache is populated.
populate_args = args[:]
populate_args.append('--no-bootstrap')
CMDpopulate(parser, populate_args)
# Get the repo directory.
options, args = parser.parse_args(args)
url = args[0]
mirror = Mirror(url)
mirror.update_bootstrap(options.prune)
return 0
@subcommand.usage('[url of repo to add to or update in cache]')
def CMDpopulate(parser, args):
"""Ensure that the cache has all up-to-date objects for the given repo."""
parser.add_option('--depth', type='int',
help='Only cache DEPTH commits of history')
parser.add_option('--shallow', '-s', action='store_true',
help='Only cache 10000 commits of history')
parser.add_option('--ref', action='append',
help='Specify additional refs to be fetched')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t bootstrap from Google Storage')
parser.add_option('--ignore_locks', '--ignore-locks',
action='store_true',
help='Don\'t try to lock repository')
options, args = parser.parse_args(args)
if not len(args) == 1:
parser.error('git cache populate only takes exactly one repo url.')
url = args[0]
mirror = Mirror(url, refs=options.ref)
kwargs = {
'verbose': options.verbose,
'shallow': options.shallow,
'bootstrap': not options.no_bootstrap,
'ignore_lock': options.ignore_locks,
}
if options.depth:
kwargs['depth'] = options.depth
mirror.populate(**kwargs)
@subcommand.usage('Fetch new commits into cache and current checkout')
def CMDfetch(parser, args):
"""Update mirror, and fetch in cwd."""
parser.add_option('--all', action='store_true', help='Fetch all remotes')
options, args = parser.parse_args(args)
# Figure out which remotes to fetch. This mimics the behavior of regular
# 'git fetch'. Note that in the case of "stacked" or "pipelined" branches,
# this will NOT try to traverse up the branching structure to find the
# ultimate remote to update.
remotes = []
if options.all:
assert not args, 'fatal: fetch --all does not take a repository argument'
remotes = subprocess.check_output([Mirror.git_exe, 'remote']).splitlines()
elif args:
remotes = args
else:
current_branch = subprocess.check_output(
[Mirror.git_exe, 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
if current_branch != 'HEAD':
upstream = subprocess.check_output(
[Mirror.git_exe, 'config', 'branch.%s.remote' % current_branch]
).strip()
if upstream and upstream != '.':
remotes = [upstream]
if not remotes:
remotes = ['origin']
cachepath = Mirror.GetCachePath()
git_dir = os.path.abspath(subprocess.check_output(
[Mirror.git_exe, 'rev-parse', '--git-dir']))
git_dir = os.path.abspath(git_dir)
if git_dir.startswith(cachepath):
mirror = Mirror.FromPath(git_dir)
mirror.populate()
return 0
for remote in remotes:
remote_url = subprocess.check_output(
[Mirror.git_exe, 'config', 'remote.%s.url' % remote]).strip()
if remote_url.startswith(cachepath):
mirror = Mirror.FromPath(remote_url)
mirror.print = lambda *args: None
print('Updating git cache...')
mirror.populate()
subprocess.check_call([Mirror.git_exe, 'fetch', remote])
return 0
@subcommand.usage('[url of repo to unlock, or -a|--all]')
def CMDunlock(parser, args):
"""Unlock one or all repos if their lock files are still around."""
parser.add_option('--force', '-f', action='store_true',
help='Actually perform the action')
parser.add_option('--all', '-a', action='store_true',
help='Unlock all repository caches')
options, args = parser.parse_args(args)
if len(args) > 1 or (len(args) == 0 and not options.all):
parser.error('git cache unlock takes exactly one repo url, or --all')
if not options.force:
cachepath = Mirror.GetCachePath()
lockfiles = [os.path.join(cachepath, path)
for path in os.listdir(cachepath)
if path.endswith('.lock') and os.path.isfile(path)]
parser.error('git cache unlock requires -f|--force to do anything. '
'Refusing to unlock the following repo caches: '
', '.join(lockfiles))
unlocked_repos = []
if options.all:
unlocked_repos.extend(Mirror.UnlockAll())
else:
m = Mirror(args[0])
if m.unlock():
unlocked_repos.append(m.mirror_path)
if unlocked_repos:
logging.info('Broke locks on these caches:\n %s' % '\n '.join(
unlocked_repos))
class OptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to handle global options."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, prog='git cache', **kwargs)
self.add_option('-c', '--cache-dir',
help='Path to the directory containing the cache')
self.add_option('-v', '--verbose', action='count', default=1,
help='Increase verbosity (can be passed multiple times)')
self.add_option('-q', '--quiet', action='store_true',
help='Suppress all extraneous output')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
if options.quiet:
options.verbose = 0
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
try:
global_cache_dir = Mirror.GetCachePath()
except RuntimeError:
global_cache_dir = None
if options.cache_dir:
if global_cache_dir and (
os.path.abspath(options.cache_dir) !=
os.path.abspath(global_cache_dir)):
logging.warn('Overriding globally-configured cache directory.')
Mirror.SetCachePath(options.cache_dir)
return options, args
def main(argv):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParser(), argv)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| bsd-3-clause |
thurt/arangodb | 3rdParty/V8-4.3.61/test/webkit/testcfg.py | 24 | 6502 | # Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
# TODO (machenbach): Share commonalities with mjstest.
class WebkitTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(WebkitTestSuite, self).__init__(name, root)
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
if 'resources' in dirs:
dirs.remove('resources')
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js"):
testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
flags = [] + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
while True:
if files_match:
files_list += files_match.group(1).strip().split()
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
files = [ os.path.normpath(os.path.join(self.root, '..', '..', f))
for f in files_list ]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
if SELF_SCRIPT_PATTERN.search(source):
env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
files = env + files
files.append(os.path.join(self.root, "resources/standalone-pre.js"))
files.append(testfilename)
files.append(os.path.join(self.root, "resources/standalone-post.js"))
flags += files
if context.isolates:
flags.append("--isolate")
flags += files
return testcase.flags + flags
def GetSourceForTest(self, testcase):
filename = os.path.join(self.root, testcase.path + self.suffix())
with open(filename) as f:
return f.read()
# TODO(machenbach): Share with test/message/testcfg.py
def _IgnoreLine(self, string):
"""Ignore empty lines, valgrind output and Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
# These five patterns appear in normal Native Client output.
string.startswith("DEBUG MODE ENABLED") or
string.startswith("tools/nacl-run.py") or
string.find("BYPASSING ALL ACL CHECKS") > 0 or
string.find("Native Client module will be loaded") > 0 or
string.find("NaClHostDescOpen:") > 0 or
# FIXME(machenbach): The test driver shouldn't try to use slow
# asserts if they weren't compiled. This fails in optdebug=2.
string == "Warning: unknown flag --enable-slow-asserts." or
string == "Try --help for options")
def IsFailureOutput(self, output, testpath):
if super(WebkitTestSuite, self).IsFailureOutput(output, testpath):
return True
file_name = os.path.join(self.root, testpath) + "-expected.txt"
with file(file_name, "r") as expected:
expected_lines = expected.readlines()
def ExpIterator():
for line in expected_lines:
if line.startswith("#") or not line.strip(): continue
yield line.strip()
def ActIterator(lines):
for line in lines:
if self._IgnoreLine(line.strip()): continue
yield line.strip()
def ActBlockIterator():
"""Iterates over blocks of actual output lines."""
lines = output.stdout.splitlines()
start_index = 0
found_eqeq = False
for index, line in enumerate(lines):
# If a stress test separator is found:
if line.startswith("=="):
# Iterate over all lines before a separator except the first.
if not found_eqeq:
found_eqeq = True
else:
yield ActIterator(lines[start_index:index])
# The next block of ouput lines starts after the separator.
start_index = index + 1
# Iterate over complete output if no separator was found.
if not found_eqeq:
yield ActIterator(lines)
for act_iterator in ActBlockIterator():
for (expected, actual) in itertools.izip_longest(
ExpIterator(), act_iterator, fillvalue=''):
if expected != actual:
return True
return False
def GetSuite(name, root):
return WebkitTestSuite(name, root)
| apache-2.0 |
alee156/clviz | prototype/connectivity.py | 2 | 8155 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
from numpy import linalg as LA
import cv2
import math
import plotly
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from plotly import tools
import time
import collections as col
from collections import OrderedDict
import ast
from ndreg import *
import ndio.remote.neurodata as neurodata
import nibabel as nib
import networkx as nx
import re
import pandas as pd
import requests
import json
import seaborn as sns
import csv, gc
from sklearn.manifold import spectral_embedding as se
import scipy.sparse as sp
plotly.offline.init_notebook_mode()
def spec_clust(graphx, num_components):
"""
Function for doing the spectral embedding.
:param graphx:
:param num_components:
:return:
"""
adj_mat = nx.adjacency_matrix(graphx)
# result = se(adj_mat, n_components=num_components, drop_first=True)
result = se(adj_mat, n_components=num_components, drop_first=False)
return result
def add_to_dict(d, region, index):
if region in d:
d[region].append(index)
else:
d[region] = [index]
return d
def get_adj_mat(regions_path):
points = np.genfromtxt(regions_path, delimiter=',')
x_dim = np.max(points[:, 0])
y_dim = np.max(points[:, 1])
z_dim = np.max(points[:, 2])
am = SparseMatrix(x_dim, y_dim, z_dim)
for point in points:
am.add(tuple(point[0:3]), point[4])
return am
def get_dict_real(g, se_result, regions_path):
nodes = g.nodes()
points = np.genfromtxt(regions_path, delimiter=',')
orig_dict = OrderedDict()
d = {}
sparse_mat = get_adj_mat(regions_path)
for index, node in enumerate(nodes):
s = g.node[node]['attr']
point = ast.literal_eval(s)
region = sparse_mat.get(tuple(point))
# if region == -1:
# # error
# print 'FUCK'
add_to_dict(d, region, index)
for point in points:
region = point[4]
# if region in orig_dict:
# orig_dict[region] = np.vstack((orig_dict[region], point[0:3]))
# else:
# orig_dict[region] = np.array([point[0:3]])
add_to_dict(orig_dict, region, point[0:3])
se_regions_nodes = {}
se_regions = {}
for key, value in d.iteritems():
index_list = value
nodes_arr = np.array(nodes)
pt_names = nodes_arr[index_list]
se_pts = se_result[index_list]
nodes_to_se = dict(zip(pt_names, se_pts)) # maps from node names to embedded point coordinates
se_regions_nodes[key] = nodes_to_se
se_regions[key] = se_pts
return se_regions, orig_dict, se_regions_nodes
def create_connectivity_graph(orig_avg_dict, se_avg_dict, max_dist=0.02):
g = nx.Graph()
for key, avg in se_avg_dict.iteritems():
for key2, avg2 in se_avg_dict.iteritems():
avg_np = np.array(avg)
avg2_np = np.array(avg2)
diff = np.linalg.norm(avg_np - avg2_np)
diff = max_dist if diff > max_dist else diff
g.add_edge(key, key2, weight=diff)
# Setting the coordinate attribute for each region node to the average of that region.
for key, avg in orig_avg_dict.iteritems():
g.node[key]['attr'] = avg
return g
def get_connectivity_hard(eig_dict, orig_dict=None, max_dist=0.02):
"""
Uses create_connectivity_graph.
:param eig_dict:
:param orig_dict:
:return:
"""
eigenvector_index = 1 # the second smallest eigenvector
avg_dict = {}
orig_avg_dict = OrderedDict()
# dict that maps from region to most connected region
con_dict = OrderedDict()
orig_con_dict = OrderedDict()
if orig_dict != None:
# Getting the original averages.
for key, region in orig_dict.iteritems():
tmp_x = []
tmp_y = []
y_vals = []
for j in range(len(region)):
y_vals.append(region[j])
y_vals = np.array(y_vals)
x_avg = np.mean(y_vals[:, 0])
y_avg = np.mean(y_vals[:, 1])
z_avg = np.mean(y_vals[:, 2])
orig_avg_dict[key] = [x_avg, y_avg, z_avg]
# avg = np.mean(y_vals)
# orig_avg_dict[key] = avg
# print 'orignal averages'
# print orig_avg_dict
# Getting connectivity for original points.
for key, avg in orig_avg_dict.iteritems():
min_key = ''
min_diff = float('inf')
for key2, avg2 in orig_avg_dict.iteritems():
if key2 == key:
continue
avg_np = np.array(avg)
avg2_np = np.array(avg2)
diff = np.linalg.norm(avg_np - avg2_np)
if diff < min_diff:
min_diff = diff
min_key = key2
orig_con_dict[float(key)] = [float(min_key), min_diff]
# Getting the average first 2 eigenvector components for each of the regions
for key, region in eig_dict.iteritems():
# print(key)
y_vals = []
for j in range(len(region)):
y_vals.append(region[j])
y_vals = np.array(y_vals)
x_avg = np.mean(y_vals[:, 0])
y_avg = np.mean(y_vals[:, 1])
z_avg = np.mean(y_vals[:, 2])
avg_dict[key] = [x_avg, y_avg, z_avg]
# print('getcon avg_dict')
# print(avg_dict)
# Computing connectivity between regions using the distance between averages
for key, avg in avg_dict.iteritems():
min_key = ''
min_diff = float('inf')
for key2, avg2 in avg_dict.iteritems():
if key2 == key:
continue
avg_np = np.array(avg)
avg2_np = np.array(avg2)
diff = np.linalg.norm(avg_np - avg2_np)
if diff < min_diff:
min_diff = diff
min_key = key2
con_dict[float(key)] = [float(min_key), min_diff]
con_dict = OrderedDict(sorted(con_dict.items()))
orig_con_dict = OrderedDict(sorted(orig_con_dict.items()))
g = create_connectivity_graph(orig_avg_dict, avg_dict, max_dist)
if orig_dict == None:
return con_dict
else:
return con_dict, orig_con_dict, g
class SparseMatrix:
def __init__(self, x, y, z):
# self._max_index = 0
x_dim = x
y_dim = y
z_dim = z
self._vector = {}
def add(self, index, value):
# vector starts at index one, because it reads from the file and the file
# always has the index of the features start at 1
self._vector[index] = value
# if index > self._max_index:
# self._max_index = index
def get(self, index):
# if the index doesn't exist in the dict, return 0 because it's sparse anyways
if index in self._vector:
return self._vector[index]
return -1
def get_sparse_matrix(self):
return self._vector
# return self._vector.keys()
# def get_full_vector(self, size=None):
# """ Returns a full vector of features as a numpy array. """
# size = (self._max_index + 1) if size == None else size
# full_vector = np.zeros(size) # 0 indexed
# for key, value in self._vector.iteritems():
# full_vector[key] = value
# return full_vector
def __str__(self):
return str(self._vector)
def plot_con_mat(con_adj_mat, output_path=None, show=False):
title = 'Connectivity Heatmap'
data = [
Heatmap(
z = con_adj_mat,
# x = con_graph.nodes(),
# y = con_graph.nodes()
)
]
layout = Layout(
title = title,
xaxis=dict(title='region'),
yaxis=dict(title='region')
)
fig = Figure(data=data, layout=layout)
if show:
iplot(fig)
if output_path != None:
plotly.offline.plot(fig, filename=output_path)
return fig | apache-2.0 |
ar7z1/ansible | test/units/vars/test_module_response_deepcopy.py | 118 | 1473 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.vars.clean import module_response_deepcopy
import pytest
def test_module_response_deepcopy_basic():
x = 42
y = module_response_deepcopy(x)
assert y == x
def test_module_response_deepcopy_atomic():
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", u"hello\u1234"]
for x in tests:
assert module_response_deepcopy(x) is x
def test_module_response_deepcopy_list():
x = [[1, 2], 3]
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_empty_tuple():
x = ()
y = module_response_deepcopy(x)
assert x is y
@pytest.mark.skip(reason='No current support for this situation')
def test_module_response_deepcopy_tuple():
x = ([1, 2], 3)
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_tuple_of_immutables():
x = ((1, 2), 3)
y = module_response_deepcopy(x)
assert x is y
def test_module_response_deepcopy_dict():
x = {"foo": [1, 2], "bar": 3}
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x["foo"] is not y["foo"]
| gpl-3.0 |
sanketloke/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
sushifant/namebench | tools/add_linkcount_and_version_to_csv.py | 174 | 1351 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add link count and version to csv"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import check_nameserver_popularity
import sys
reader = csv.reader(open(sys.argv[1]))
writer = csv.writer(open('output.csv', 'w'))
sys.path.append('..')
#sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import addr_util
from libnamebench import nameserver
for row in reader:
ip = row[0]
ns = nameserver.NameServer(ip)
ns.timeout = 0.5
ns.health_timeout = 0.5
try:
link_count = len(check_nameserver_popularity.GetUrls(ip))
except:
link_count = ''
row.insert(-1, link_count)
row.append(ns.version or '')
print "%s: %s" % (ip, ns.version)
writer.writerow(row)
| apache-2.0 |
dailab/RIOT | dist/tools/headerguards/headerguards.py | 17 | 2289 | #!/usr/bin/env python3
import os, sys
import difflib
#from string import maketrans
from io import BytesIO, TextIOWrapper
_in = "/-."
_out = "___"
transtab = str.maketrans(_in, _out)
def path_to_guardname(filepath):
res = filepath.upper().translate(transtab)
if res.startswith("_"):
res = "PRIV" + res
return res
def get_guard_name(filepath):
parts = filepath.split(os.sep)
start = 0
found = False
for i, part in enumerate(parts):
if part == "include":
found = True
start = i+1
break
if not found:
start = len(parts) -1
return path_to_guardname(os.path.join(*parts[start:]))
def fix_headerguard(filename):
supposed = get_guard_name(filename)
with open(filename, "r",encoding='utf-8', errors='ignore') as f:
inlines = f.readlines()
tmp = TextIOWrapper(BytesIO(), encoding="utf-8", errors="ignore")
tmp.seek(0)
guard_found = 0
guard_name = ""
ifstack = 0
for n, line in enumerate(inlines):
if guard_found == 0:
if line.startswith("#ifndef"):
guard_found += 1
guard_name = line[8:].rstrip()
line = "#ifndef %s\n" % (supposed)
elif guard_found == 1:
if line.startswith("#define") and line[8:].rstrip() == guard_name:
line = "#define %s\n" % (supposed)
guard_found += 1
else:
break
elif guard_found == 2:
if line.startswith("#if"):
ifstack += 1
elif line.startswith("#endif"):
if ifstack > 0:
ifstack -= 1
else:
guard_found += 1
line = "#endif /* %s */\n" % supposed
tmp.write(line)
tmp.seek(0)
if guard_found == 3:
for line in difflib.unified_diff(inlines, tmp.readlines(), "%s" % filename, "%s" % filename):
sys.stdout.write(line)
else:
print("%s: no / broken header guard" % filename, file=sys.stderr)
return False
if __name__=="__main__":
error = False
for filename in sys.argv[1:]:
if fix_headerguard(filename) == False:
error = True
if error:
sys.exit(1)
| lgpl-2.1 |
VirtueSecurity/aws-extender | BappModules/boto/services/sonofmmm.py | 170 | 3498 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.service import Service
from boto.services.message import ServiceMessage
import os
import mimetypes
class SonOfMMM(Service):
def __init__(self, config_file=None):
super(SonOfMMM, self).__init__(config_file)
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.working_dir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
if self.sd.has_option('ffmpeg_args'):
self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
else:
self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
self.output_mimetype = self.sd.get('output_mimetype')
if self.sd.has_option('output_ext'):
self.output_ext = self.sd.get('output_ext')
else:
self.output_ext = mimetypes.guess_extension(self.output_mimetype)
self.output_bucket = self.sd.get_obj('output_bucket')
self.input_bucket = self.sd.get_obj('input_bucket')
# check to see if there are any messages queue
# if not, create messages for all files in input_bucket
m = self.input_queue.read(1)
if not m:
self.queue_files()
def queue_files(self):
boto.log.info('Queueing files from %s' % self.input_bucket.name)
for key in self.input_bucket:
boto.log.info('Queueing %s' % key.name)
m = ServiceMessage()
if self.output_bucket:
d = {'OutputBucket' : self.output_bucket.name}
else:
d = None
m.for_key(key, d)
self.input_queue.write(m)
def process_file(self, in_file_name, msg):
base, ext = os.path.splitext(in_file_name)
out_file_name = os.path.join(self.working_dir,
base+self.output_ext)
command = self.command % (in_file_name, out_file_name)
boto.log.info('running:\n%s' % command)
status = self.run(command)
if status == 0:
return [(out_file_name, self.output_mimetype)]
else:
return []
def shutdown(self):
if os.path.isfile(self.log_path):
if self.output_bucket:
key = self.output_bucket.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
super(SonOfMMM, self).shutdown()
| mit |
tiagochiavericosta/edx-platform | common/test/acceptance/pages/lms/dashboard.py | 41 | 6349 | # -*- coding: utf-8 -*-
"""
Student dashboard page.
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class DashboardPage(PageObject):
"""
Student dashboard, where the student can view
courses she/he has registered for.
"""
def __init__(self, browser):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
"""
super(DashboardPage, self).__init__(browser)
url = "{base}/dashboard".format(base=BASE_URL)
def is_browser_on_page(self):
return self.q(css='section.my-courses').present
@property
def current_courses_text(self):
"""
This is the title label for the section of the student dashboard that
shows all the courses that the student is enrolled in.
The string displayed is defined in lms/templates/dashboard.html.
"""
text_items = self.q(css='section#my-courses').text
if len(text_items) > 0:
return text_items[0]
else:
return ""
@property
def available_courses(self):
"""
Return list of the names of available courses (e.g. "999 edX Demonstration Course")
"""
def _get_course_name(el):
return el.text
return self.q(css='h3.course-title > a').map(_get_course_name).results
@property
def banner_text(self):
"""
Return the text of the banner on top of the page, or None if
the banner is not present.
"""
message = self.q(css='div.wrapper-msg')
if message.present:
return message.text[0]
return None
def get_enrollment_mode(self, course_name):
"""Get the enrollment mode for a given course on the dashboard.
Arguments:
course_name (str): The name of the course whose mode should be retrieved.
Returns:
String, indicating the enrollment mode for the course corresponding to
the provided course name.
Raises:
Exception, if no course with the provided name is found on the dashboard.
"""
# Filter elements by course name, only returning the relevant course item
course_listing = self.q(css=".course").filter(lambda el: course_name in el.text).results
if course_listing:
# There should only be one course listing for the provided course name.
# Since 'ENABLE_VERIFIED_CERTIFICATES' is true in the Bok Choy settings, we
# can expect two classes to be present on <article> elements, one being 'course'
# and the other being the enrollment mode.
enrollment_mode = course_listing[0].get_attribute('class').split('course ')[1]
else:
raise Exception("No course named {} was found on the dashboard".format(course_name))
return enrollment_mode
def upgrade_enrollment(self, course_name, upgrade_page):
"""Interact with the upgrade button for the course with the provided name.
Arguments:
course_name (str): The name of the course whose mode should be checked.
upgrade_page (PageObject): The page to wait on after clicking the upgrade button. Importing
the definition of PaymentAndVerificationFlow results in a circular dependency.
Raises:
Exception, if no enrollment corresponding to the provided course name appears
on the dashboard.
"""
# Filter elements by course name, only returning the relevant course item
course_listing = self.q(css=".course").filter(lambda el: course_name in el.text).results
if course_listing:
# There should only be one course listing corresponding to the provided course name.
el = course_listing[0]
# Click the upgrade button
el.find_element_by_css_selector('#upgrade-to-verified').click()
upgrade_page.wait_for_page()
else:
raise Exception("No enrollment for {} is visible on the dashboard.".format(course_name))
def view_course(self, course_id):
"""
Go to the course with `course_id` (e.g. edx/Open_DemoX/edx_demo_course)
"""
link_css = self._link_css(course_id)
if link_css is not None:
self.q(css=link_css).first.click()
else:
msg = "No links found for course {0}".format(course_id)
self.warning(msg)
def _link_css(self, course_id):
"""
Return a CSS selector for the link to the course with `course_id`.
"""
# Get the link hrefs for all courses
all_links = self.q(css='a.enter-course').map(lambda el: el.get_attribute('href')).results
# Search for the first link that matches the course id
link_index = None
for index in range(len(all_links)):
if course_id in all_links[index]:
link_index = index
break
if link_index is not None:
return "a.enter-course:nth-of-type({0})".format(link_index + 1)
else:
return None
def pre_requisite_message_displayed(self):
"""
Verify if pre-requisite course messages are being displayed.
"""
return self.q(css='li.prerequisites > .tip').visible
def get_course_listings(self):
"""Retrieve the list of course DOM elements"""
return self.q(css='ul.listing-courses')
def get_course_social_sharing_widget(self, widget_name):
""" Retrieves the specified social sharing widget by its classification """
return self.q(css='a.action-{}'.format(widget_name))
def click_username_dropdown(self):
"""
Click username dropdown.
"""
self.q(css='.dropdown').first.click()
@property
def username_dropdown_link_text(self):
"""
Return list username dropdown links.
"""
return self.q(css='.dropdown-menu li a').text
def click_my_profile_link(self):
"""
Click on `Profile` link.
"""
self.q(css='.dropdown-menu li a').nth(1).click()
def click_account_settings_link(self):
"""
Click on `Account` link.
"""
self.q(css='.dropdown-menu li a').nth(2).click()
| agpl-3.0 |
maartenq/ansible | lib/ansible/modules/network/cloudengine/ce_facts.py | 65 | 11122 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_facts
version_added: "2.4"
author: "wangdezhuang (@CloudEngine-Ansible)"
short_description: Gets facts about HUAWEI CloudEngine switches.
description:
- Collects facts from CloudEngine devices running the CloudEngine
operating system. Fact collection is supported over Cli
transport. This module prepends all of the base network fact keys
with C(ansible_net_<fact>). The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a
list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine facts test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Gather_subset is all"
ce_facts:
gather_subset: all
provider: "{{ cli }}"
- name: "Collect only the config facts"
ce_facts:
gather_subset: config
provider: "{{ cli }}"
- name: "Do not collect hardware facts"
ce_facts:
gather_subset: "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
BIOS Version:
description: The BIOS version running on the remote device
returned: always
type: str
Board Type:
description: The board type of the remote device
returned: always
type: str
CPLD1 Version:
description: The CPLD1 Version running the remote device
returned: always
type: str
CPLD2 Version:
description: The CPLD2 Version running the remote device
returned: always
type: str
MAB Version:
description: The MAB Version running the remote device
returned: always
type: str
PCB Version:
description: The PCB Version running the remote device
returned: always
type: str
hostname:
description: The hostname of the remote device
returned: always
type: str
# hardware
FAN:
description: The fan state on the device
returned: when hardware is configured
type: str
PWR:
description: The power state on the device
returned: when hardware is configured
type: str
filesystems:
description: The filesystems on the device
returned: when hardware is configured
type: str
flash_free:
description: The flash free space on the device
returned: when hardware is configured
type: str
flash_total:
description: The flash total space on the device
returned: when hardware is configured
type: str
memory_free:
description: The memory free space on the remote device
returned: when hardware is configured
type: str
memory_total:
description: The memory total space on the remote device
returned: when hardware is configured
type: str
# config
config:
description: The current system configuration on the device
returned: when config is configured
type: str
# interfaces
all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.cloudengine.ce import run_commands
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
""" Class default """
COMMANDS = [
'display version',
'display current-configuration | include sysname'
]
def populate(self):
""" Populate method """
super(Default, self).populate()
data = self.responses[0]
if data:
version = data.split("\n")
tmp_version = version[11:]
for item in tmp_version:
tmp_item = item.split()
tmp_key = tmp_item[1] + " " + tmp_item[2]
self.facts[tmp_key] = tmp_item[4]
data = self.responses[1]
if data:
tmp_value = re.findall(r'sysname (.*)', data)
self.facts['hostname'] = tmp_value[0]
class Config(FactsBase):
""" Class config """
COMMANDS = [
'display current-configuration configuration system'
]
def populate(self):
""" Populate method """
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data.split("\n")
class Hardware(FactsBase):
""" Class hardware """
COMMANDS = [
'dir',
'display memory',
'display device'
]
def populate(self):
""" Populate method """
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['filesystems'] = re.findall(r'^Directory of (.*)/', data)[0]
self.facts['flash_total'] = re.findall(r'(.*) total', data)[0].replace(",", "")
self.facts['flash_free'] = re.findall(r'total \((.*) free\)', data)[0].replace(",", "")
data = self.responses[1]
if data:
memory_total = re.findall(r'Total Memory Used: (.*) Kbytes', data)[0]
use_percent = re.findall(r'Memory Using Percentage: (.*)%', data)[0]
memory_free = str(int(memory_total) - int(memory_total) * int(use_percent) / 100)
self.facts['memory_total'] = memory_total + " Kb"
self.facts['memory_free'] = memory_free + " Kb"
data = self.responses[2]
if data:
device_info = data.split("\n")
tmp_device_info = device_info[4:-1]
for item in tmp_device_info:
tmp_item = item.split()
if len(tmp_item) == 8:
self.facts[tmp_item[2]] = tmp_item[6]
elif len(tmp_item) == 7:
self.facts[tmp_item[0]] = tmp_item[5]
class Interfaces(FactsBase):
""" Class interfaces """
COMMANDS = [
'display interface brief',
'display ip interface brief',
'display lldp neighbor brief'
]
def populate(self):
""" Populate method"""
interface_dict = dict()
ipv4_addr_dict = dict()
neighbors_dict = dict()
super(Interfaces, self).populate()
data = self.responses[0]
if data:
interface_info = data.split("\n")
tmp_interface = interface_info[12:]
for item in tmp_interface:
tmp_item = item.split()
interface_dict[tmp_item[0]] = tmp_item[1]
self.facts['interfaces'] = interface_dict
data = self.responses[1]
if data:
ipv4_addr = data.split("\n")
tmp_ipv4 = ipv4_addr[11:]
for item in tmp_ipv4:
tmp_item = item.split()
ipv4_addr_dict[tmp_item[0]] = tmp_item[1]
self.facts['all_ipv4_addresses'] = ipv4_addr_dict
data = self.responses[2]
if data:
neighbors = data.split("\n")
tmp_neighbors = neighbors[2:]
for item in tmp_neighbors:
tmp_item = item.split()
neighbors_dict[tmp_item[0]] = tmp_item[3]
self.facts['neighbors'] = neighbors_dict
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Module main """
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
# this is to maintain capability with nxos_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
else:
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
zinid/mrim | src/protocol.py | 1 | 19357 | from mmptypes import *
import utils
import UserDict
import cStringIO
import socket
import struct
import email
from email.Utils import parsedate
wp_request = {}
wp_request_reversed = {}
for k,v in [(key, locals()[key]) for key in locals().keys() if key.startswith('MRIM_CS_WP_REQUEST_PARAM')]:
wp_request[v] = k
for k,v in wp_request.items():
wp_request_reversed[v] = k
del k,v
message_flags = tuple([v for k,v in locals().items() if k.startswith('MESSAGE_FLAG')])
class MMPParsingError(Exception):
def __init__(self, text, packet):
self.args = text,packet
self.text = text
self.packet = packet
def __str__(self):
return self.text
class MMPHeader(UserDict.UserDict):
def __init__(self,typ=0,dlen=0,seq=0,fromip='0.0.0.0',fromport='0',header=''):
UserDict.UserDict.__init__(self)
self.header = header
self.typ = typ
self.frmt = '5I4s4s16B'
if not self.header:
self['magic'] = CS_MAGIC
self['proto'] = PROTO_VERSION
self['seq'] = seq
self['msg'] = typ
self['from'] = fromip
self['fromport'] = fromport
self['dlen'] = dlen
self['reserved'] = tuple([0 for i in range(16)])
else:
try:
unpacked_header = struct.unpack(self.frmt, self.header)
except struct.error:
raise MMPParsingError("Can't unpack header", self.header)
self['magic'] = unpacked_header[0]
self['proto'] = unpacked_header[1]
self['seq'] = unpacked_header[2]
self['msg'] = unpacked_header[3]
self['dlen'] = unpacked_header[4]
self['from'] = socket.inet_ntoa(unpacked_header[5])
self['fromport'] = socket.inet_ntoa(unpacked_header[6])
self['reserved'] = unpacked_header[7:]
def __str__(self):
if not self.header:
try:
new_header = struct.pack(
self.frmt,
self['magic'],
self['proto'],
self['seq'],
self['msg'],
self['dlen'],
socket.inet_aton(self['from']),
socket.inet_aton(self['fromport']),
*self['reserved']
)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack header", self)
return new_header
else:
return self.header
class MMPBody(UserDict.UserDict):
def __init__(self, typ=0, dict={}, body=''):
UserDict.UserDict.__init__(self)
self.dict = dict
self.body = body
self.typ = typ
if self.body:
self.io = cStringIO.StringIO(body)
self.str2dict(body)
elif self.dict:
self.io = cStringIO.StringIO()
self.update(dict)
def __str__(self):
if self.body:
return self.body
elif self.dict:
return self.dict2str(self.dict)
else:
return ''
def str2dict(self, body):
try:
return self._str2dict(body)
except struct.error:
raise MMPParsingError("Can't unpack body", body)
def dict2str(self, dict):
try:
return self._dict2str(dict)
except (struct.error, KeyError):
raise MMPParsingError("Can't pack body", dict)
def _str2dict(self, body):
if self.typ == MRIM_CS_HELLO_ACK:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_LOGIN_REJ:
self['reason'] = self._read_lps()
elif self.typ == MRIM_CS_MESSAGE:
self['flags'] = self._read_ul()
self['to'] = self._read_lps()
self['message'] = self._read_lps()
self['rtf-message'] = self.readl_lps()
elif self.typ == MRIM_CS_MESSAGE_ACK:
self['msg_id'] = self._read_ul()
self['flags'] = self._read_ul()
self['from'] = self._read_lps()
self['message'] = self._read_lps()
try:
self['rtf-message'] = self._read_lps()
except struct.error:
self['rtf-message'] = ' '
elif self.typ == MRIM_CS_MESSAGE_RECV:
self['from'] = self._read_lps()
self['msg_id'] = self._read_ul()
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_STATUS:
self['status'] = self._read_ul()
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_LOGOUT:
self['reason'] = self._read_ul()
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self['ping_period'] = self._read_ul()
elif self.typ == MRIM_CS_ADD_CONTACT:
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['email'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_ul()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self['status'] = self._read_ul()
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contact_id'] = self._read_ul()
else:
return
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self['id'] = self._read_ul()
self['flags'] = self._read_ul()
self['group_id'] = self._read_ul()
self['contact'] = self._read_lps()
self['name'] = self._read_lps()
self['phones'] = self._read_lps()
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self['uidl'] = self._read_uidl()
self['message'] = self._read_lps()
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self['uidl'] = self._read_uidl()
elif self.typ == MRIM_CS_AUTHORIZE:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self['user'] = self._read_lps()
elif self.typ == MRIM_CS_CHANGE_STATUS:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self['status'] = self._read_ul()
self['session'] = self._read_lps()
elif self.typ == MRIM_CS_WP_REQUEST:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_ul()
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_ANKETA_INFO:
self['status'] = self._read_ul()
self['fields_num'] = self._read_ul()
self['max_rows'] = self._read_ul()
self['server_time'] = self._read_ul()
self['fields'] = [self._read_lps() for i in range(self['fields_num'])]
self['values'] = []
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['values'].append(tuple([self._read_lps() for i in range(self['fields_num'])]))
current_position = self.io.tell()
else:
break
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self['count'] = self._read_ul()
self['sender'] = self._read_lps()
self['subject'] = self._read_lps()
self['unix_time'] = self._read_ul()
self['key'] = self._read_ul()
elif self.typ == MRIM_CS_MAILBOX_STATUS_OLD:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_CONTACT_LIST2:
self['status'] = self._read_ul()
if self['status'] == GET_CONTACTS_OK:
self['groups_number'] = self._read_ul()
self['groups_mask'] = self._read_lps()
self['contacts_mask'] = self._read_lps()
self['groups'] = [
self._read_masked_field(self['groups_mask']) \
for i in range(self['groups_number'])
]
self['contacts'] = []
while 1:
current_position = self.io.tell()
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
self['contacts'].append(
self._read_masked_field(self['contacts_mask'])
)
else:
break
else:
self['groups_number'] = 0
self['groups_mask'] = self['contacts_mask'] = ''
self['groups'] = self['contacts'] = []
elif self.typ == MRIM_CS_LOGIN2:
self['login'] = self._read_lps()
self['password'] = self._read_lps()
self['status'] = self._read_ul()
self['user_agent'] = self._read_lps()
elif self.typ == MRIM_CS_SMS:
self['UNKNOWN'] = self._read_ul()
self['number'] = self._read_lps()
self['text'] = self._read_lps()
elif self.typ == MRIM_CS_SMS_ACK:
self['status'] = self._read_ul()
elif self.typ == MRIM_CS_USER_INFO:
current_position = self.io.tell()
while 1:
next_char = self.io.read(1)
if next_char:
self.io.seek(current_position)
field = self._read_lps()
if field == 'MESSAGES.TOTAL':
self['total'] = int(self._read_lps())
elif field == 'MESSAGES.UNREAD':
self['unread'] = int(self._read_lps())
elif field == 'MRIM.NICKNAME':
self['nickname'] = self._read_lps()
else:
self[field] = self._read_lps()
current_position = self.io.tell()
else:
break
def _dict2str(self, dict):
self.io = cStringIO.StringIO()
if self.typ == MRIM_CS_HELLO_ACK:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_LOGIN_REJ:
self._write_lps(dict['reason'])
elif self.typ == MRIM_CS_MESSAGE:
self._write_ul(dict['flags'])
self._write_lps(dict['to'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_ACK:
self._write_ul(dict['msg_id'])
self._write_ul(dict['flags'])
self._write_lps(dict['from'])
self._write_lps(dict['message'])
self._write_lps(dict['rtf-message'])
elif self.typ == MRIM_CS_MESSAGE_RECV:
self._write_lps(dict['from'])
self._write_ul(dict['msg_id'])
elif self.typ == MRIM_CS_MESSAGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_USER_STATUS:
self._write_ul(dict['status'])
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_LOGOUT:
self._write_ul(dict['reason'])
elif self.typ == MRIM_CS_CONNECTION_PARAMS:
self._write_ul(dict['ping_period'])
elif self.typ == MRIM_CS_ADD_CONTACT:
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['email'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
self._write_lps(dict['text'])
elif self.typ == MRIM_CS_ADD_CONTACT_ACK:
self._write_ul(dict['status'])
self._write_ul(dict['contact_id'])
elif self.typ == MRIM_CS_MODIFY_CONTACT:
self._write_ul(dict['id'])
self._write_ul(dict['flags'])
self._write_ul(dict['group_id'])
self._write_lps(dict['contact'])
self._write_lps(dict['name'])
self._write_lps(dict['phones'])
elif self.typ == MRIM_CS_MODIFY_CONTACT_ACK:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_OFFLINE_MESSAGE_ACK:
self._write_uidl(dict['uidl'])
self._write_lps(dict['message'])
elif self.typ == MRIM_CS_DELETE_OFFLINE_MESSAGE:
self._write_uidl(dict['uidl'])
elif self.typ == MRIM_CS_AUTHORIZE:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_AUTHORIZE_ACK:
self._write_lps(dict['user'])
elif self.typ == MRIM_CS_CHANGE_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_GET_MPOP_SESSION_ACK:
self._write_ul(dict['status'])
self._write_lps(dict['session'])
elif self.typ == MRIM_CS_WP_REQUEST:
for k,v in [(p,s) for p,s in dict.items() if p != MRIM_CS_WP_REQUEST_PARAM_ONLINE]:
self._write_ul(k)
self._write_lps(v)
if dict.has_key(MRIM_CS_WP_REQUEST_PARAM_ONLINE):
self._write_ul(MRIM_CS_WP_REQUEST_PARAM_ONLINE)
self._write_lps(dict[MRIM_CS_WP_REQUEST_PARAM_ONLINE])
elif self.typ == MRIM_CS_ANKETA_INFO:
self._write_ul(dict['status'])
self._write_ul(dict['fields_num'])
self._write_ul(dict['max_rows'])
self._write_ul(dict['server_time'])
for field in dict['fields']:
self._write_lps(field)
for value in dict['values']:
self._write_lps(value)
elif self.typ == MRIM_CS_MAILBOX_STATUS:
self._write_ul(dict['status'])
elif self.typ == MRIM_CS_LOGIN2:
self._write_lps(dict['login'])
self._write_lps(dict['password'])
self._write_ul(dict['status'])
self._write_lps(dict['user_agent'])
elif self.typ == MRIM_CS_SMS:
self._write_ul(dict['UNKNOWN'])
self._write_lps(dict['number'])
self._write_lps(dict['text'])
self.io.seek(0)
return self.io.read()
def _read_ul(self):
return struct.unpack('I', self.io.read(4))[0]
def _read_lps(self):
return self.io.read(self._read_ul())
def _read_uidl(self):
return self.io.read(8)
def _write_ul(self, ul):
self.io.write(struct.pack('I', ul))
def _write_lps(self, lps):
self._write_ul(len(lps))
self.io.write(lps)
def _write_uidl(self, uidl):
self.io.write(uidl[:8])
def _read_masked_field(self, mask):
group = []
for i in range(len(mask)):
symbol = mask[i]
if symbol == 'u':
group.append(self._read_ul())
elif symbol == 's':
group.append(self._read_lps())
return tuple(group)
class MMPPacket:
def __init__(self,typ=0,seq=0,fromip='0.0.0.0',fromport='0',dict={},packet=''):
self.header = ''
self.body = ''
self.typ = typ
if packet:
raw_header = packet[:44]
try:
magic = struct.unpack('I', raw_header[:4])[0]
except:
magic = 0
if magic == CS_MAGIC:
self.header = MMPHeader(header=raw_header)
if self.header:
self.typ = self.header['msg']
dlen = self.header['dlen']
self.body = MMPBody(typ=self.typ,body=packet[44:44+dlen])
else:
self.body = MMPBody(self.typ,dict)
dlen = len(self.body.__str__())
self.header = MMPHeader(self.typ,dlen,seq,fromip,fromport)
self.setHeaderAttr('seq', utils.seq())
def __str__(self):
return self.header.__str__() + self.body.__str__()
def getRawVersion(self):
return self.header['proto']
def getVersion(self):
p = self.getRawVersion()
return '%s.%s' % (utils.get_proto_major(p), utils.get_proto_minor(p))
def getType(self):
return self.header['msg']
def getHeader(self):
return self.header
def getBody(self):
return self.body
def getBodyAttr(self, attr):
return self.body[attr]
def getHeaderAttr(self, attr):
return self.header[attr]
def setHeaderAttr(self, attr, val):
self.header[attr] = val
def setBodyAttr(self, attr, val):
self.body[attr] = val
self.body = MMPBody(self.getType(),dict=self.body)
self.setHeaderAttr('dlen', len(self.body.__str__()))
def setIp(self, ip):
self.setHeaderAttr('from', ip)
def setPort(self, port):
self.setHeaderAttr('fromport', port)
def setType(self, new_typ):
self.setHeaderAttr['msg'] = new_typ
def setId(self, _id):
self.setHeaderAttr('seq', _id)
def getId(self):
return self.getHeaderAttr('seq')
def setMsgId(self, msg_id):
self.setBodyAttr('msg_id', msg_id)
def getMsgId(self):
if self.getBody().has_key('msg_id'):
return self.getBodyAttr('msg_id')
class Message(MMPPacket):
def __init__(self,to='',body=' ',flags=[],payload=None):
if not payload:
d = {}
flags_sum = 0
for f in flags:
flags_sum += f
d['flags'] = flags_sum & MESSAGE_USERFLAGS_MASK
d['to'] = to
d['message'] = body
if MESSAGE_FLAG_RTF in flags:
d['rtf-message'] = utils.pack_rtf(body)
else:
d['rtf-message'] = ' '
MMPPacket.__init__(self,typ=MRIM_CS_MESSAGE,dict=d)
self.setHeaderAttr('seq', utils.seq())
else:
MMPPacket.__init__(self,typ=payload.getType(),dict=payload.getBody())
def getTo(self):
return self.getBodyAttr('to')
def getFrom(self):
return self.getBodyAttr('from')
def getBodyPayload(self):
return utils.win2str(self.getBodyAttr('message'))
def getFlags(self):
flag_code = self.getBodyAttr('flags')
flags = []
for f in message_flags:
x = flag_code & f
if x:
flags.append(x)
return flags
def hasFlag(self, flag):
return flag in self.getFlags()
class OfflineMessage(UserDict.UserDict):
def __init__(self, data):
UserDict.UserDict.__init__(self)
self.msg = email.message_from_string(data)
self.boundary = self.msg['Boundary']
self.payload = self.msg.get_payload().split('--%s--' % self.boundary)
self['from'] = self.msg['From']
self['date'] = parsedate(self.msg['Date'])
self['subject'] = self.msg['Subject']
self['flags'] = eval('0x'+self.msg['X-MRIM-Flags'])
self['version'] = self.msg['Version']
self['message'] = utils.win2str(self.payload[0].strip())
self['rtf-message'] = self.payload[1].strip()
def buildMessage(self):
d = {
'msg_id':0,
'flags':self['flags'],
'from':self['from'],
'message':self.payload[0].strip(),
'rtf-message':self['rtf-message']
}
m = MMPPacket(typ=MRIM_CS_MESSAGE_ACK,dict=d)
return Message(payload=m)
def getUTCTime(self):
return utils.msk2utc(self['date'])
class Anketa(MMPPacket):
def __init__(self, data):
MMPPacket.__init__(self,packet=data)
def getStatus(self):
return self.getBodyAttr('status')
def getFields(self):
return self.getBodyAttr('fields')
def getVCards(self):
vcards = []
fields = self.getFields()
for card in self.getBodyAttr('values'):
card_dict = {}
for n in range(self.getBodyAttr('fields_num')):
card_dict[fields[n]] = utils.win2str(card[n])
vcards.append(card_dict)
return vcards
class ContactList:
def __init__(self, packet=None):
self.cids = {}
self.users = {}
self.group = {}
if packet:
self.packet = packet
self.users = self.getUsers()
self.groups = self.getGroups()
i = 0
for u in self.packet.getBodyAttr('contacts'):
_id = 20+i
if (u[0] & CONTACT_FLAG_SMS):
self.cids[u[6]] = _id
else:
self.cids[u[2]] = _id
i += 1
def getGroups(self):
d = {}
for g in self.packet.getBodyAttr('groups'):
d[g[0]] = {'name':utils.win2str(g[1])}
return d
def getUsers(self):
d = {}
for u in self.packet.getBodyAttr('contacts'):
contact = {
'flags':u[0],
'group':u[1],
'nick':utils.win2str(u[3]),
'server_flags':u[4],
'status':u[5],
'phones':u[6]
}
if (u[0] & CONTACT_FLAG_SMS):
d[u[6]] = contact
else:
d[u[2]] = contact
return d
def getEmails(self):
return self.users.keys()
def getUserFlags(self, mail):
return self.users[mail]['flags']
def isValidUser(self, mail):
return not (self.isIgnoredUser(mail) or self.isRemovedUser(mail) or self.isSMSNumber(mail))
def isIgnoredUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_IGNORE)
def isRemovedUser(self, mail):
flags = self.getUserFlags(mail)
return bool(flags & CONTACT_FLAG_REMOVED)
def isSMSNumber(self, phone):
return not utils.is_valid_email(phone)
def getUserId(self, mail):
return self.cids[mail]
def setUserId(self, mail, _id):
self.cids[mail] = _id
def getUserStatus(self, mail):
status = 1
if utils.is_valid_email(mail):
status = self.users[mail]['status']
return status
def setUserStatus(self, mail, status):
self.users[mail]['status'] = status
def getAuthFlag(self, mail):
return self.users[mail]['server_flags']
def setAuthFlag(self, mail, flag):
self.users[mail]['server_flags'] = flag
def isAuthorized(self, mail):
return not bool(self.getAuthFlag(mail) & 0x1)
def getUserGroup(self, mail):
return self.users[mail]['group']
def setUserGroup(self, mail, gid):
self.users[mail]['group'] = gid
def getUserNick(self, mail):
return self.users[mail]['nick']
def setUserNick(self, mail, nick):
self.users[mail]['nick'] = nick
def delUser(self, mail):
return self.users.pop(mail)
def delGroup(self, gid):
return self.groups.pop(gid)
def getGroupName(self, gid):
name = 'unknown'
try:
name = self.groups[gid]
except KeyError:
pass
return name
def setGroupName(self, gid, name):
self.groups[gid] = name
def getGroupMembers(self, gid):
members = []
for u in self.users:
if self.getUserGroup(u) == gid:
members.append(u)
return members
def getPhones(self, mail):
phones = self.users[mail]['phones']
if phones:
return phones.split(',')
else:
return []
def setPhones(self, mail, phones):
self.users[mail]['phones'] = ','.join(phones[:3])
| gpl-3.0 |
shubhdev/edx-platform | lms/djangoapps/courseware/features/lti.py | 49 | 14242 | # pylint: disable=missing-docstring
import datetime
import os
import pytz
from django.conf import settings
from mock import patch
from pytz import UTC
from splinter.exceptions import ElementDoesNotExist
from selenium.common.exceptions import NoAlertPresentException
from nose.tools import assert_true, assert_equal, assert_in, assert_is_none
from lettuce import world, step
from courseware.tests.factories import InstructorFactory, BetaTesterFactory
from courseware.access import has_access
from student.tests.factories import UserFactory
from common import visit_scenario_item
TEST_COURSE_NAME = "test_course_a"
@step('I view the LTI and error is shown$')
def lti_is_not_rendered(_step):
# error is shown
assert world.is_css_present('.error_message', wait_time=0)
# iframe is not presented
assert not world.is_css_present('iframe', wait_time=0)
# link is not presented
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
def check_lti_iframe_content(text):
# inside iframe test content is presented
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=0)
assert (text == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI and it is rendered in (.*)$')
def lti_is_rendered(_step, rendered_in):
if rendered_in.strip() == 'iframe':
world.wait_for_present('iframe')
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# iframe is visible
assert world.css_visible('iframe')
check_lti_iframe_content("This is LTI tool. Success.")
elif rendered_in.strip() == 'new page':
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
click_and_check_lti_popup()
else: # incorrect rendered_in parameter
assert False
@step('I view the permission alert$')
def view_lti_permission_alert(_step):
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
world.css_find('.link_lti_new_window').first.click()
alert = world.browser.get_alert()
assert alert is not None
assert len(world.browser.windows) == 1
def check_no_alert():
"""
Make sure the alert has gone away.
Note that the splinter documentation indicates that
get_alert should return None if no alert is present,
however that is not the case. Instead a
NoAlertPresentException is raised.
"""
try:
assert_is_none(world.browser.get_alert())
except NoAlertPresentException:
pass
@step('I accept the permission alert and view the LTI$')
def accept_lti_permission_alert(_step):
parent_window = world.browser.current_window # Save the parent window
# To start with you should only have one window/tab
assert len(world.browser.windows) == 1
alert = world.browser.get_alert()
alert.accept()
check_no_alert()
# Give it a few seconds for the LTI window to appear
world.wait_for(
lambda _: len(world.browser.windows) == 2,
timeout=5,
timeout_msg="Timed out waiting for the LTI window to appear."
)
# Verify the LTI window
check_lti_popup(parent_window)
@step('I reject the permission alert and do not view the LTI$')
def reject_lti_permission_alert(_step):
alert = world.browser.get_alert()
alert.dismiss()
check_no_alert()
assert len(world.browser.windows) == 1
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# inside iframe test content is presented
check_lti_iframe_content("Wrong LTI signature")
@step('the course has correct LTI credentials with registered (.*)$')
def set_correct_lti_passport(_step, user='Instructor'):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["correct_lti_id:test_client_key:test_client_secret"]
}
i_am_registered_for_the_course(coursenum, metadata, user)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["test_lti_id:test_client_key:incorrect_lti_secret_key"]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component with (.*) fields(?:\:)?$') # , new_page is(.*), graded is(.*)
def add_correct_lti_to_course(_step, fields):
category = 'lti'
metadata = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://127.0.0.1:{}/correct_lti_endpoint'.format(settings.LTI_PORT),
}
if fields.strip() == 'incorrect_lti_id': # incorrect fields
metadata.update({
'lti_id': 'incorrect_lti_id'
})
elif fields.strip() == 'correct': # correct fields
pass
elif fields.strip() == 'no_launch_url':
metadata.update({
'launch_url': u''
})
else: # incorrect parameter
assert False
if _step.hashes:
metadata.update(_step.hashes[0])
world.scenario_dict['LTI'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category=category,
display_name='LTI',
metadata=metadata,
)
setattr(world.scenario_dict['LTI'], 'TEST_BASE_PATH', '{host}:{port}'.format(
host=world.browser.host,
port=world.browser.port,
))
visit_scenario_item('LTI')
def create_course_for_lti(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
weight = 0.1
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": weight
},
]
}
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata,
grading_policy=grading_policy,
)
# Add a section to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
metadata={'graded': True, 'format': 'Homework'})
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def i_am_registered_for_the_course(coursenum, metadata, user='Instructor'):
# Create user
if user == 'BetaTester':
# Create the course
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=5)
metadata.update({'days_early_for_beta': 5, 'start': tomorrow})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
# create beta tester
user = BetaTesterFactory(course_key=course_descriptor.id)
normal_student = UserFactory()
instructor = InstructorFactory(course_key=course_descriptor.id)
assert not has_access(normal_student, 'load', course_descriptor)
assert has_access(user, 'load', course_descriptor)
assert has_access(instructor, 'load', course_descriptor)
else:
metadata.update({'start': datetime.datetime(1970, 1, 1, tzinfo=UTC)})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
user = InstructorFactory(course_key=course_descriptor.id)
# Enroll the user in the course and log them in
if has_access(user, 'load', course_descriptor):
world.enroll_user(user, course_descriptor.id)
world.log_in(username=user.username, password='test')
def check_lti_popup(parent_window):
# You should now have 2 browser windows open, the original courseware and the LTI
windows = world.browser.windows
assert_equal(len(windows), 2)
# For verification, iterate through the window titles and make sure that
# both are there.
tabs = []
expected_tabs = [u'LTI | Test Section | {0} Courseware | edX'.format(TEST_COURSE_NAME), u'TEST TITLE']
for window in windows:
world.browser.switch_to_window(window)
tabs.append(world.browser.title)
assert_equal(tabs, expected_tabs) # pylint: disable=no-value-for-parameter
# Now verify the contents of the LTI window (which is the 2nd window/tab)
# Note: The LTI opens in a new browser window, but Selenium sticks with the
# current window until you explicitly switch to the context of the new one.
world.browser.switch_to_window(windows[1])
url = world.browser.url
basename = os.path.basename(url)
pathname = os.path.splitext(basename)[0]
assert_equal(pathname, u'correct_lti_endpoint')
result = world.css_find('.result').first.text
assert_equal(result, u'This is LTI tool. Success.')
world.browser.driver.close() # Close the pop-up window
world.browser.switch_to_window(parent_window) # Switch to the main window again
def click_and_check_lti_popup():
parent_window = world.browser.current_window # Save the parent window
world.css_find('.link_lti_new_window').first.click()
check_lti_popup(parent_window)
@step('visit the LTI component')
def visit_lti_component(_step):
visit_scenario_item('LTI')
@step('I see LTI component (.*) with text "([^"]*)"$')
def see_elem_text(_step, elem, text):
selector_map = {
'progress': '.problem-progress',
'feedback': '.problem-feedback',
'module title': '.problem-header',
'button': '.link_lti_new_window',
'description': '.lti-description'
}
assert_in(elem, selector_map)
assert_true(world.css_has_text(selector_map[elem], text))
@step('I see text "([^"]*)"$')
def check_progress(_step, text):
assert world.browser.is_text_present(text)
@step('I see graph with total progress "([^"]*)"$')
def see_graph(_step, progress):
selector = 'grade-detail-graph'
xpath = '//div[@id="{parent}"]//div[text()="{progress}"]'.format(
parent=selector,
progress=progress,
)
node = world.browser.find_by_xpath(xpath)
assert node
@step('I see in the gradebook table that "([^"]*)" is "([^"]*)"$')
def see_value_in_the_gradebook(_step, label, text):
table_selector = '.grade-table'
index = 0
table_headers = world.css_find('{0} thead th'.format(table_selector))
for i, element in enumerate(table_headers):
if element.text.strip() == label:
index = i
break
assert_true(world.css_has_text('{0} tbody td'.format(table_selector), text, index=index))
@step('I submit answer to LTI (.*) question$')
def click_grade(_step, version):
version_map = {
'1': {'selector': 'submit-button', 'expected_text': 'LTI consumer (edX) responded with XML content'},
'2': {'selector': 'submit-lti2-button', 'expected_text': 'LTI consumer (edX) responded with HTTP 200'},
}
assert_in(version, version_map)
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
iframe.find_by_name(version_map[version]['selector']).first.click()
assert iframe.is_text_present(version_map[version]['expected_text'])
@step('LTI provider deletes my grade and feedback$')
def click_delete_button(_step):
with world.browser.get_iframe(get_lti_frame_name()) as iframe:
iframe.find_by_name('submit-lti2-delete-button').first.click()
def get_lti_frame_name():
location = world.scenario_dict['LTI'].location.html_id()
return 'ltiFrame-' + location
@step('I see in iframe that LTI role is (.*)$')
def check_role(_step, role):
world.wait_for_present('iframe')
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
expected_role = 'Role: ' + role
role = world.retry_on_exception(
lambda: iframe.find_by_tag('h5').first.value,
max_attempts=5,
ignored_exceptions=ElementDoesNotExist
)
assert_equal(expected_role, role)
@step('I switch to (.*)$')
def switch_view(_step, view):
staff_status = world.css_find('#action-preview-select').first.value
if staff_status != view:
world.browser.select("select", view)
world.wait_for_ajax_complete()
assert_equal(world.css_find('#action-preview-select').first.value, view)
@step("in the LTI component I do not see (.*)$")
def check_lti_component_no_elem(_step, text):
selector_map = {
'a launch button': '.link_lti_new_window',
'an provider iframe': '.ltiLaunchFrame',
'feedback': '.problem-feedback',
'progress': '.problem-progress',
}
assert_in(text, selector_map)
assert_true(world.is_css_not_present(selector_map[text]))
| agpl-3.0 |
hughbe/swift | tools/SourceKit/bindings/python/sourcekitd/capi.py | 60 | 17369 | # capi.py - sourcekitd Python Bindings -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from ctypes import (
CFUNCTYPE,
POINTER,
Structure,
addressof,
c_bool,
c_char_p,
c_int,
c_int64,
c_size_t,
c_uint64,
c_void_p,
cdll,
py_object,
string_at,
)
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
# Structures and Utility Classes
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except AttributeError:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class Object(object):
def __init__(self, obj):
import sys
if sys.version_info > (3,):
long = int
if isinstance(obj, Object):
self._obj = conf.lib.sourcekitd_request_retain(obj)
elif isinstance(obj, (int, long, bool)):
self._obj = conf.lib.sourcekitd_request_int64_create(obj)
elif isinstance(obj, str):
self._obj = conf.lib.sourcekitd_request_string_create(obj)
elif isinstance(obj, UIdent):
self._obj = conf.lib.sourcekitd_request_uid_create(obj)
elif isinstance(obj, dict):
self._obj = conf.lib.sourcekitd_request_dictionary_create(
POINTER(c_void_p)(), POINTER(c_void_p)(), 0)
self._as_parameter_ = self._obj
for k, v in obj.iteritems():
conf.lib.sourcekitd_request_dictionary_set_value(
self,
UIdent(k), Object(v))
elif isinstance(obj, (list, tuple)):
self._obj = conf.lib.sourcekitd_request_array_create(
POINTER(c_void_p)(), 0)
self._as_parameter_ = self._obj
for v in obj:
conf.lib.sourcekitd_request_array_set_value(
self, -1, Object(v))
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
self._as_parameter_ = self._obj
def from_param(self):
return self._as_parameter_
def __del__(self):
if self._obj:
conf.lib.sourcekitd_request_release(self)
def __repr__(self):
ptr = conf.lib.sourcekitd_request_description_copy(self)
s = string_at(ptr)
conf.free(ptr)
return s
class Response(object):
def __init__(self, obj):
if isinstance(obj, c_object_p):
self._obj = self._as_parameter_ = obj
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
def get_payload(self):
return conf.lib.sourcekitd_response_get_value(self)
def from_param(self):
return self._as_parameter_
def __del__(self):
if self._obj:
conf.lib.sourcekitd_response_dispose(self)
def __repr__(self):
ptr = conf.lib.sourcekitd_response_description_copy(self)
s = string_at(ptr)
conf.free(ptr)
return s
class UIdent(object):
def __init__(self, obj):
if isinstance(obj, c_object_p):
self._obj = obj
elif isinstance(obj, UIdent):
self._obj = obj._obj
elif isinstance(obj, str):
self._obj = conf.lib.sourcekitd_uid_get_from_cstr(obj)
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
self._as_parameter_ = self._obj
def __str__(self):
return conf.lib.sourcekitd_uid_get_string_ptr(self)
def from_param(self):
return self._as_parameter_
def __repr__(self):
return "UIdent('%s')" % self.__str__()
def _ptr(self):
return addressof(self._obj.contents)
def __eq__(self, other):
return self._ptr() == UIdent(other)._ptr()
def __ne__(self, other):
return self._ptr() != UIdent(other)._ptr()
def __hash__(self):
return hash(self._ptr())
class ErrorKind(object):
"""Describes the kind of type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(ErrorKind._kinds):
ErrorKind._kinds += [None] * (value - len(ErrorKind._kinds) + 1)
if ErrorKind._kinds[value] is not None:
raise ValueError('ErrorKind already loaded')
self.value = value
ErrorKind._kinds[value] = self
ErrorKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this error kind."""
if self._name_map is None:
self._name_map = {}
for key, value in ErrorKind.__dict__.items():
if isinstance(value, ErrorKind):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(ErrorKind._kinds) or ErrorKind._kinds[id] is None:
raise ValueError('Unknown type kind {}'.format(id))
return ErrorKind._kinds[id]
def __repr__(self):
return 'ErrorKind.%s' % (self.name,)
ErrorKind.CONNECTION_INTERRUPTED = ErrorKind(1)
ErrorKind.REQUEST_INVALID = ErrorKind(2)
ErrorKind.REQUEST_FAILED = ErrorKind(3)
ErrorKind.REQUEST_CANCELLED = ErrorKind(4)
class Variant(Structure):
_fields_ = [
("data", c_uint64 * 3)]
def to_python_object(self):
var_ty = conf.lib.sourcekitd_variant_get_type(self)
if var_ty == VariantType.NULL:
return None
elif var_ty == VariantType.DICTIONARY:
return self.to_python_dictionary()
elif var_ty == VariantType.ARRAY:
return self.to_python_array()
elif var_ty == VariantType.INT64:
return conf.lib.sourcekitd_variant_int64_get_value(self)
elif var_ty == VariantType.STRING:
return conf.lib.sourcekitd_variant_string_get_ptr(self)
elif var_ty == VariantType.UID:
return UIdent(conf.lib.sourcekitd_variant_uid_get_value(self))
else:
assert(var_ty == VariantType.BOOL)
return conf.lib.sourcekitd_variant_bool_get_value(self)
def to_python_array(self):
def applier(index, value, arr):
arr.append(value.to_python_object())
# continue
return 1
arr = []
conf.lib.sourcekitd_variant_array_apply_f(
self, callbacks['array_applier'](applier), arr)
return arr
def to_python_dictionary(self):
def applier(cobj, value, d):
d[str(UIdent(cobj))] = value.to_python_object()
# continue
return 1
d = {}
conf.lib.sourcekitd_variant_dictionary_apply_f(
self, callbacks['dictionary_applier'](applier), d)
return d
class VariantType(object):
"""Describes the kind of type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(VariantType._kinds):
VariantType._kinds += [None] * \
(value - len(VariantType._kinds) + 1)
if VariantType._kinds[value] is not None:
raise ValueError('VariantType already loaded')
self.value = value
VariantType._kinds[value] = self
VariantType._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this variant type."""
if self._name_map is None:
self._name_map = {}
for key, value in VariantType.__dict__.items():
if isinstance(value, VariantType):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(VariantType._kinds) or VariantType._kinds[id] is None:
raise ValueError('Unknown type kind {}'.format(id))
return VariantType._kinds[id]
def __repr__(self):
return 'VariantType.%s' % (self.name,)
VariantType.NULL = VariantType(0)
VariantType.DICTIONARY = VariantType(1)
VariantType.ARRAY = VariantType(2)
VariantType.INT64 = VariantType(3)
VariantType.STRING = VariantType(4)
VariantType.UID = VariantType(5)
VariantType.BOOL = VariantType(6)
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['array_applier'] = CFUNCTYPE(c_int, c_size_t, Variant, py_object)
callbacks['dictionary_applier'] = CFUNCTYPE(
c_int, c_object_p, Variant, py_object)
# Functions strictly alphabetical order.
functionList = [
("sourcekitd_cancel_request",
[c_void_p]),
("sourcekitd_initialize",
None),
("sourcekitd_request_array_create",
[POINTER(c_object_p), c_size_t],
c_object_p),
("sourcekitd_request_array_set_int64",
[Object, c_size_t, c_int64]),
("sourcekitd_request_array_set_string",
[Object, c_size_t, c_char_p]),
("sourcekitd_request_array_set_stringbuf",
[Object, c_size_t, c_char_p, c_size_t]),
("sourcekitd_request_array_set_uid",
[Object, c_size_t, UIdent]),
("sourcekitd_request_array_set_value",
[Object, c_size_t, Object]),
("sourcekitd_request_create_from_yaml",
[c_char_p, POINTER(c_char_p)],
c_object_p),
("sourcekitd_request_description_copy",
[Object],
c_void_p),
("sourcekitd_request_description_dump",
[Object]),
("sourcekitd_request_dictionary_create",
[POINTER(c_object_p), POINTER(c_object_p), c_size_t],
c_object_p),
("sourcekitd_request_dictionary_set_int64",
[Object, UIdent, c_int64]),
("sourcekitd_request_dictionary_set_string",
[Object, UIdent, c_char_p]),
("sourcekitd_request_dictionary_set_stringbuf",
[Object, UIdent, c_char_p, c_size_t]),
("sourcekitd_request_dictionary_set_uid",
[Object, UIdent, UIdent]),
("sourcekitd_request_dictionary_set_value",
[Object, UIdent, Object]),
("sourcekitd_request_int64_create",
[c_int64],
c_object_p),
("sourcekitd_request_retain",
[Object],
c_object_p),
("sourcekitd_request_release",
[Object]),
("sourcekitd_request_string_create",
[c_char_p],
c_object_p),
("sourcekitd_request_uid_create",
[UIdent],
c_object_p),
("sourcekitd_response_description_copy",
[Response],
c_char_p),
("sourcekitd_response_description_dump",
[Response]),
("sourcekitd_response_description_dump_filedesc",
[Response, c_int]),
("sourcekitd_response_dispose",
[Response]),
("sourcekitd_response_error_get_description",
[Response],
c_char_p),
("sourcekitd_response_error_get_kind",
[Response],
ErrorKind.from_id),
("sourcekitd_response_get_value",
[Response],
Variant),
("sourcekitd_response_is_error",
[Response],
c_bool),
("sourcekitd_send_request_sync",
[Object],
c_object_p),
("sourcekitd_shutdown",
None),
("sourcekitd_uid_get_from_buf",
[c_char_p, c_size_t],
c_object_p),
("sourcekitd_uid_get_from_cstr",
[c_char_p],
c_object_p),
("sourcekitd_uid_get_length",
[UIdent],
c_size_t),
("sourcekitd_uid_get_string_ptr",
[UIdent],
c_char_p),
("sourcekitd_variant_array_apply_f",
[Variant, callbacks['array_applier'], py_object],
c_bool),
("sourcekitd_variant_array_get_bool",
[Variant, c_size_t],
c_bool),
("sourcekitd_variant_array_get_count",
[Variant],
c_size_t),
("sourcekitd_variant_array_get_int64",
[Variant, c_size_t],
c_int64),
("sourcekitd_variant_array_get_string",
[Variant, c_size_t],
c_char_p),
("sourcekitd_variant_array_get_uid",
[Variant, c_size_t],
c_object_p),
("sourcekitd_variant_array_get_value",
[Variant, c_size_t],
Variant),
("sourcekitd_variant_bool_get_value",
[Variant],
c_bool),
("sourcekitd_variant_dictionary_apply_f",
[Variant, callbacks['dictionary_applier'], py_object],
c_bool),
("sourcekitd_variant_dictionary_get_bool",
[Variant, UIdent],
c_bool),
("sourcekitd_variant_dictionary_get_int64",
[Variant, UIdent],
c_int64),
("sourcekitd_variant_dictionary_get_string",
[Variant, UIdent],
c_char_p),
("sourcekitd_variant_dictionary_get_value",
[Variant, UIdent],
Variant),
("sourcekitd_variant_dictionary_get_uid",
[Variant, UIdent],
c_object_p),
("sourcekitd_variant_get_type",
[Variant],
VariantType.from_id),
("sourcekitd_variant_string_get_length",
[Variant],
c_size_t),
("sourcekitd_variant_string_get_ptr",
[Variant],
c_char_p),
("sourcekitd_variant_int64_get_value",
[Variant],
c_int64),
("sourcekitd_variant_uid_get_value",
[Variant],
c_object_p),
]
class LibsourcekitdError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of sourcekitd.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your Python bindings are "\
"compatible with your sourcekitd version."
if ignore_errors:
return
raise LibsourcekitdError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a sourcekitd library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
map(register, functionList)
class Config(object):
library_path = None
library_file = None
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for sourcekitd"""
if Config.loaded:
raise Exception("library path must be set before before using "
"any other functionalities in sourcekitd.")
Config.library_path = path
@staticmethod
def set_library_file(filename):
"""Set the exact location of sourcekitd"""
if Config.loaded:
raise Exception("library file must be set before before using "
"any other functionalities in sourcekitd.")
Config.library_file = filename
@CachedProperty
def lib(self):
lib = self.get_sourcekitd_library()
register_functions(lib, False)
Config.loaded = True
return lib
@CachedProperty
def free(self):
free = cdll.LoadLibrary('libc.dylib').free
free.argtypes = [c_void_p]
return free
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
# The XPC service cannot run via the bindings due to permissions
# issue.
# file = 'sourcekitd.framework/sourcekitd'
file = 'libsourcekitdInProc.dylib'
elif name == 'Windows':
file = 'sourcekitd.dll'
else:
file = 'sourcekitd.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_sourcekitd_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to sourcekitd use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibsourcekitdError(msg)
return library
conf = Config()
conf.lib.sourcekitd_initialize()
__all__ = [
'Config',
'Object',
'Response',
'UIdent',
'ErrorKind',
'Variant',
'VariantType'
]
| apache-2.0 |
gw280/skia | tools/test_pictures.py | 1 | 6084 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Compares the renderings of serialized SkPicture files and directories specified
by input with the images in expectedDir. Note, files in directoriers are
expected to end with .skp.
'''
def RunCommand(command):
"""Run a command.
@param command the command as a single string
"""
print 'running command [%s]...' % command
os.system(command)
def FindPathToProgram(program):
"""Return path to an existing program binary, or raise an exception if we
cannot find one.
@param program the name of the program that is being looked for
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + ".exe"),
os.path.join(trunk_path, 'out', 'Debug',
program + ".exe")]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
def RenderImages(inputs, render_dir, options):
"""Renders the serialized SkPictures.
Uses the render_pictures program to do the rendering.
@param inputs the location(s) to read the serlialized SkPictures
@param render_dir the location to write out the rendered images
"""
renderer_path = FindPathToProgram('render_pictures')
inputs_as_string = " ".join(inputs)
command = '%s %s %s' % (renderer_path, inputs_as_string, render_dir)
if (options.mode is not None):
command += ' --mode %s' % ' '.join(options.mode)
if (options.device is not None):
command += ' --device %s' % options.device
RunCommand(command)
def DiffImages(expected_dir, comparison_dir, diff_dir):
"""Diffs the rendered SkPicture images with the baseline images.
Uses the skdiff program to do the diffing.
@param expected_dir the location of the baseline images.
@param comparison_dir the location of the images to comapre with the
baseline
@param diff_dir the location to write out the diff results
"""
skdiff_path = FindPathToProgram('skdiff')
RunCommand('%s %s %s %s %s' %
(skdiff_path, expected_dir, comparison_dir, diff_dir,
'--noprintdirs'))
def Cleanup(options, render_dir, diff_dir):
"""Deletes any temporary folders and files created.
@param options The OptionParser object that parsed if render_dir or diff_dir
was set
@param render_dir the directory where the rendered images were written
@param diff_dir the directory where the diff results were written
"""
if (not options.render_dir):
if (os.path.isdir(render_dir)):
shutil.rmtree(render_dir)
if (not options.diff_dir):
if (os.path.isdir(diff_dir)):
shutil.rmtree(diff_dir)
def ModeParse(option, opt_str, value, parser):
"""Parses the --mode option of the commandline.
The --mode option will either take in three parameters (if tile or
pow2tile) or a single parameter (otherwise).
"""
result = [value]
if value == "tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode tile mising width"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
elif value == "pow2tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode pow2tile mising minWidth"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
setattr(parser.values, option.dest, result)
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ("specify the location to output the rendered files."
" Default is a temp directory."))
parser.add_option('--diff_dir', dest='diff_dir',
help = ("specify the location to output the diff files."
" Default is a temp directory."))
parser.add_option('--mode', dest='mode', type='string',
action="callback", callback=ModeParse,
help = ("specify how rendering is to be done."))
parser.add_option('--device', dest='device',
help = ("specify the device to render to."))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
if (options.render_dir):
render_dir = options.render_dir
else:
render_dir = tempfile.mkdtemp()
if (options.diff_dir):
diff_dir = options.diff_dir
else:
diff_dir = tempfile.mkdtemp()
try:
RenderImages(inputs, render_dir, options)
DiffImages(expected_dir, render_dir, diff_dir)
finally:
Cleanup(options, render_dir, diff_dir)
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
mammique/django | django/contrib/gis/geos/geometry.py | 103 | 25896 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis import memoryview
# super-class for mutable list behavior
from django.contrib.gis.geos.mutable_list import ListMixin
# GEOS-related dependencies.
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.mutable_list import ListMixin
# All other functions in this module come from the ctypes
# prototypes module -- which handles all interaction with
# the underlying GEOS library.
from django.contrib.gis.geos import prototypes as capi
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import wkt_r, wkt_w, wkb_r, wkb_w, ewkb_w
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
#### Python 'magic' routines ####
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'): srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geomtry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if bool(g):
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int): self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr: capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"WKT is used for the string representation."
return self.wkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(memoryview(wkb))
if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
#### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
#### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
#### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
if not GEOS_PREPARE:
raise GEOSException('Upgrade GEOS to 3.1 to get validity reason.')
return capi.geos_isvalidreason(self.ptr).decode()
#### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
#### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0: return None
else: return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
#### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (WKT + SRID) of the Geometry. Note that Z values
are *not* included in this representation because GEOS does not yet
support serializing them.
"""
if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt)
else: return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w().write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(self.hasz and 3 or 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
if self.hasz and not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D HEXEWKB.')
return ewkb_w(self.hasz and 3 or 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(self.hasz and 3 or 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
if self.hasz and not GEOS_PREPARE:
# See: http://trac.osgeo.org/geos/ticket/216
raise GEOSException('Upgrade GEOS to 3.1 to get valid 3D EWKB.')
return ewkb_w(self.hasz and 3 or 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
if GEOS_PREPARE:
return PreparedGeometry(self)
else:
raise GEOSException('GEOS 3.1+ required for prepared geometry support.')
#### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.OGRGeometry(self.wkb, self.srid)
else:
return gdal.OGRGeometry(self.wkb)
else:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if gdal.HAS_GDAL:
if self.srid:
return gdal.SpatialReference(self.srid)
else:
return None
else:
raise GEOSException('GDAL required to return a SpatialReference object.')
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
#### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate'):
raise NotImplementedError('interpolate requires GEOS 3.2+')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_interpolate_normalized'):
raise NotImplementedError('interpolate_normalized requires GEOS 3.2+')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project'):
raise NotImplementedError('geos_project requires GEOS 3.2+')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
if not hasattr(capi, 'geos_project_normalized'):
raise NotImplementedError('project_normalized requires GEOS 3.2+')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
#### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumfrence of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos.collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
GEOS_CLASSES = {0 : Point,
1 : LineString,
2 : LinearRing,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
}
# If supported, import the PreparedGeometry class.
if GEOS_PREPARE:
from django.contrib.gis.geos.prepared import PreparedGeometry
| bsd-3-clause |
cfhamlet/os-urlpattern | src/os_urlpattern/pattern_maker.py | 1 | 4225 | """Pattern clustering procedure APIs.
"""
from .compat import itervalues
from .config import get_default_config
from .definition import BasePattern
from .parse_utils import EMPTY_PARSED_PIECE, ParsedPiece
from .parser import fuzzy_digest, parse
from .pattern_cluster import cluster
from .piece_pattern_node import PiecePatternNode, build_from_parsed_pieces
from .utils import TreeNode, build_tree, dump_tree, pick
class PatternMaker(object):
"""Scaffold for simplifying clustering.
After load urls, iterate all sub makers make cluster
individually or cluster all by calling make method.
"""
def __init__(self, config=None):
self._config = get_default_config() if config is None else config
self._makers = {}
@property
def makers(self):
"""iterable: For iterating all sub makers."""
return itervalues(self._makers)
def load(self, url, meta=None):
"""Load url and meta.
Args:
url (str): The URL to be loaded.
meta (object, optional): Defaults to None. Meta data will be
merged at each cluster and can be accessed by clustered
node's meta property.
Returns:
tuple: 2-tules, (node, is_new).
"""
url_meta, parsed_pieces = parse(url)
if not isinstance(parsed_pieces[0], ParsedPiece):
raise ValueError('Invalid URL')
sid = fuzzy_digest(url_meta, parsed_pieces)
if sid not in self._makers:
self._makers[sid] = Maker(url_meta, self._config)
return self._makers[sid].load(parsed_pieces, meta=meta)
def make(self, combine=False):
"""Iterate all sub makers, start clustering and yield clustered.
Args:
combine (bool, optional): Defaults to False. Combine the
same url_meta clusters into a patten tree.
Yields:
tuple: 2-tuple, (url_meta, clustered). The clustered is the
root of a clustered tree.
"""
for maker in self.makers:
for clustered in maker.make(combine):
yield maker.url_meta, clustered
class Maker(object):
"""Low-level APIs for clustering.
Suppose this will only be used for same fuzzy-digest clustering.
"""
def __init__(self, url_meta, config=None):
self._url_meta = url_meta
self._config = get_default_config() if config is None else config
self._root = PiecePatternNode((EMPTY_PARSED_PIECE, None))
@property
def url_meta(self):
"""URLMeta: The URLMeta object."""
return self._url_meta
def load(self, parsed_pieces, meta=None):
"""Load parsed pieces and meta.
Args:
parsed_pieces (list): The parsed pieces to be loaded.
meta (object, optional): Defaults to None. Meta data will be
merged at each cluster and can be accessed by clustered
node's meta property.
Returns:
tuple: 2-tules, (node, is_new).
"""
return build_from_parsed_pieces(self._root,
parsed_pieces,
meta=meta)
def _cluster(self):
for clustered in cluster(self._config,
self._url_meta,
self._root):
yield clustered
def _combine_clusters(self):
root = TreeNode(BasePattern.EMPTY)
for clustered in self._cluster():
nodes = pick(dump_tree(clustered))
build_tree(root, [(n.pattern, n.pattern)
for n in nodes[1:]], nodes[0].count)
yield root
def make(self, combine=False):
"""Start clustering and yield clustered.
Args:
combine (bool, optional): Defaults to False. Combine the
clusters into a patten tree.
Yields:
TreeNode: Root of the clustered tree. If combine=False yield
all clustered parsed piece trees otherwise yield a
combined pattern tree.
"""
if combine:
return self._combine_clusters()
return self._cluster()
| mit |
nosix/PyCraft | src/pycraft/service/composite/entity/monster.py | 1 | 1206 | # -*- coding: utf8 -*-
from pycraft.service.const import EntityType
from pycraft.service.primitive.geometry import Size
from .base import MobEntity
from .player import PlayerEntity
class MonsterEntity(MobEntity):
def has_hostile(self, entity):
return isinstance(entity, PlayerEntity)
class Zombie(MonsterEntity):
TYPE = EntityType.ZOMBIE
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.95)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Skeleton(MonsterEntity):
TYPE = EntityType.SKELTON
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Creeper(MonsterEntity):
TYPE = EntityType.CREEPER
STRENGTH = 10
BODY_SIZE = Size(0.6, 0.6, 1.8)
VIEW_DISTANCE = 64
VIEW_ANGLE_H = 60
VIEW_ANGLE_V = 30
class Spider(MonsterEntity):
TYPE = EntityType.SPIDER
STRENGTH = 8
BODY_SIZE = Size(1.4, 1.4, 0.9)
VIEW_DISTANCE = 32
def can_climb(self):
return True
class Enderman(MonsterEntity):
TYPE = EntityType.ENDERMAN
STRENGTH = 20
BODY_SIZE = Size(0.6, 0.6, 2.9)
VIEW_ANGLE_H = 90
VIEW_ANGLE_V = 10
| lgpl-3.0 |
home-assistant/home-assistant | homeassistant/components/homematic/const.py | 5 | 6149 | """Constants for the homematic component."""
DOMAIN = "homematic"
DISCOVER_SWITCHES = "homematic.switch"
DISCOVER_LIGHTS = "homematic.light"
DISCOVER_SENSORS = "homematic.sensor"
DISCOVER_BINARY_SENSORS = "homematic.binary_sensor"
DISCOVER_COVER = "homematic.cover"
DISCOVER_CLIMATE = "homematic.climate"
DISCOVER_LOCKS = "homematic.locks"
DISCOVER_BATTERY = "homematic.battery"
ATTR_DISCOVER_DEVICES = "devices"
ATTR_PARAM = "param"
ATTR_CHANNEL = "channel"
ATTR_ADDRESS = "address"
ATTR_DEVICE_TYPE = "device_type"
ATTR_VALUE = "value"
ATTR_VALUE_TYPE = "value_type"
ATTR_INTERFACE = "interface"
ATTR_ERRORCODE = "error"
ATTR_MESSAGE = "message"
ATTR_UNIQUE_ID = "unique_id"
ATTR_PARAMSET_KEY = "paramset_key"
ATTR_PARAMSET = "paramset"
ATTR_RX_MODE = "rx_mode"
ATTR_DISCOVERY_TYPE = "discovery_type"
ATTR_LOW_BAT = "LOW_BAT"
ATTR_LOWBAT = "LOWBAT"
EVENT_KEYPRESS = "homematic.keypress"
EVENT_IMPULSE = "homematic.impulse"
EVENT_ERROR = "homematic.error"
SERVICE_VIRTUALKEY = "virtualkey"
SERVICE_RECONNECT = "reconnect"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_PUT_PARAMSET = "put_paramset"
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
"Switch",
"SwitchPowermeter",
"IOSwitch",
"IOSwitchNoInhibit",
"IPSwitch",
"RFSiren",
"IPSwitchPowermeter",
"HMWIOSwitch",
"Rain",
"EcoLogic",
"IPKeySwitchPowermeter",
"IPGarage",
"IPKeySwitch",
"IPKeySwitchLevel",
"IPMultiIO",
"IPWSwitch",
"IOSwitchWireless",
"IPWIODevice",
"IPSwitchBattery",
],
DISCOVER_LIGHTS: [
"Dimmer",
"KeyDimmer",
"IPKeyDimmer",
"IPDimmer",
"ColorEffectLight",
"IPKeySwitchLevel",
"ColdWarmDimmer",
"IPWDimmer",
],
DISCOVER_SENSORS: [
"SwitchPowermeter",
"Motion",
"MotionV2",
"RemoteMotion",
"MotionIP",
"ThermostatWall",
"AreaThermostat",
"RotaryHandleSensor",
"WaterSensor",
"PowermeterGas",
"LuxSensor",
"WeatherSensor",
"WeatherStation",
"ThermostatWall2",
"TemperatureDiffSensor",
"TemperatureSensor",
"CO2Sensor",
"IPSwitchPowermeter",
"HMWIOSwitch",
"FillingLevel",
"ValveDrive",
"EcoLogic",
"IPThermostatWall",
"IPSmoke",
"RFSiren",
"PresenceIP",
"IPAreaThermostat",
"IPWeatherSensor",
"RotaryHandleSensorIP",
"IPPassageSensor",
"IPKeySwitchPowermeter",
"IPThermostatWall230V",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPBrightnessSensor",
"IPGarage",
"UniversalSensor",
"MotionIPV2",
"IPMultiIO",
"IPThermostatWall2",
"IPRemoteMotionV2",
"HBUNISenWEA",
"PresenceIPW",
"IPRainSensor",
"ValveBox",
"IPKeyBlind",
"IPKeyBlindTilt",
"IPLanRouter",
"TempModuleSTE2",
],
DISCOVER_CLIMATE: [
"Thermostat",
"ThermostatWall",
"MAXThermostat",
"ThermostatWall2",
"MAXWallThermostat",
"IPThermostat",
"IPThermostatWall",
"ThermostatGroup",
"IPThermostatWall230V",
"IPThermostatWall2",
],
DISCOVER_BINARY_SENSORS: [
"ShutterContact",
"Smoke",
"SmokeV2",
"Motion",
"MotionV2",
"MotionIP",
"RemoteMotion",
"WeatherSensor",
"TiltSensor",
"IPShutterContact",
"HMWIOSwitch",
"MaxShutterContact",
"Rain",
"WiredSensor",
"PresenceIP",
"IPWeatherSensor",
"IPPassageSensor",
"SmartwareMotion",
"IPWeatherSensorPlus",
"MotionIPV2",
"WaterIP",
"IPMultiIO",
"TiltIP",
"IPShutterContactSabotage",
"IPContact",
"IPRemoteMotionV2",
"IPWInputDevice",
"IPWMotionDection",
"IPAlarmSensor",
"IPRainSensor",
"IPLanRouter",
],
DISCOVER_COVER: [
"Blind",
"KeyBlind",
"IPKeyBlind",
"IPKeyBlindTilt",
"IPGarage",
"IPKeyBlindMulti",
"IPWKeyBlindMulti",
],
DISCOVER_LOCKS: ["KeyMatic"],
}
HM_IGNORE_DISCOVERY_NODE = ["ACTUAL_TEMPERATURE", "ACTUAL_HUMIDITY"]
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS = {
"ACTUAL_TEMPERATURE": [
"IPAreaThermostat",
"IPWeatherSensor",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPThermostatWall",
"IPThermostatWall2",
]
}
HM_ATTRIBUTE_SUPPORT = {
"LOWBAT": ["battery", {0: "High", 1: "Low"}],
"LOW_BAT": ["battery", {0: "High", 1: "Low"}],
"ERROR": ["error", {0: "No"}],
"ERROR_SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"RSSI_PEER": ["rssi_peer", {}],
"RSSI_DEVICE": ["rssi_device", {}],
"VALVE_STATE": ["valve", {}],
"LEVEL": ["level", {}],
"BATTERY_STATE": ["battery", {}],
"CONTROL_MODE": [
"mode",
{0: "Auto", 1: "Manual", 2: "Away", 3: "Boost", 4: "Comfort", 5: "Lowering"},
],
"POWER": ["power", {}],
"CURRENT": ["current", {}],
"VOLTAGE": ["voltage", {}],
"OPERATING_VOLTAGE": ["voltage", {}],
"WORKING": ["working", {0: "No", 1: "Yes"}],
"STATE_UNCERTAIN": ["state_uncertain", {}],
}
HM_PRESS_EVENTS = [
"PRESS_SHORT",
"PRESS_LONG",
"PRESS_CONT",
"PRESS_LONG_RELEASE",
"PRESS",
]
HM_IMPULSE_EVENTS = ["SEQUENCE_OK"]
CONF_RESOLVENAMES_OPTIONS = ["metadata", "json", "xml", False]
DATA_HOMEMATIC = "homematic"
DATA_STORE = "homematic_store"
DATA_CONF = "homematic_conf"
CONF_INTERFACES = "interfaces"
CONF_LOCAL_IP = "local_ip"
CONF_LOCAL_PORT = "local_port"
CONF_CALLBACK_IP = "callback_ip"
CONF_CALLBACK_PORT = "callback_port"
CONF_RESOLVENAMES = "resolvenames"
CONF_JSONPORT = "jsonport"
| apache-2.0 |
leilihh/novaha | nova/tests/api/openstack/compute/plugins/v3/test_migrations.py | 11 | 3995 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.api.openstack.compute.plugins.v3 import migrations
from nova import context
from nova import exception
from nova.objects import base
from nova.objects import migration
from nova.openstack.common.fixture import moxstubout
from nova import test
fake_migrations = [
{
'id': 1234,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'Done',
'instance_uuid': 'instance_id_123',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
{
'id': 5678,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'Done',
'instance_uuid': 'instance_id_456',
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
}
]
migrations_obj = base.obj_make_list(
'fake-context',
migration.MigrationList(),
migration.Migration,
fake_migrations)
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class MigrationsTestCase(test.NoDBTestCase):
def setUp(self):
"""Run before each test."""
super(MigrationsTestCase, self).setUp()
self.controller = migrations.MigrationsController()
self.context = context.get_admin_context()
self.req = FakeRequest()
self.req.environ['nova.context'] = self.context
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
def test_index(self):
migrations_in_progress = {
'migrations': migrations.output(migrations_obj)}
for mig in migrations_in_progress['migrations']:
self.assertTrue('id' in mig)
self.assertTrue('deleted' not in mig)
self.assertTrue('deleted_at' not in mig)
filters = {'host': 'host1', 'status': 'migrating',
'cell_name': 'ChildCell'}
self.req.GET = filters
self.mox.StubOutWithMock(self.controller.compute_api,
"get_migrations")
self.controller.compute_api.get_migrations(
self.context, filters).AndReturn(migrations_obj)
self.mox.ReplayAll()
response = self.controller.index(self.req)
self.assertEqual(migrations_in_progress, response)
def test_index_needs_authorization(self):
user_context = context.RequestContext(user_id=None,
project_id=None,
is_admin=False,
read_deleted="no",
overwrite=False)
self.req.environ['nova.context'] = user_context
self.assertRaises(exception.PolicyNotAuthorized, self.controller.index,
self.req)
| apache-2.0 |
sun1991/lvsys | lvsys/env_lvsys/Lib/site-packages/click/termui.py | 202 | 21008 | import os
import sys
import struct
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type
from .globals import resolve_color_default
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
'cyan', 'white', 'reset')
_ansi_reset_all = '\033[0m'
def hidden_prompt_func(prompt):
import getpass
return getpass.getpass(prompt)
def _build_prompt(text, suffix, show_default=False, default=None):
prompt = text
if default is not None and show_default:
prompt = '%s [%s]' % (prompt, default)
return prompt + suffix
def prompt(text, default=None, hide_input=False,
confirmation_prompt=False, type=None,
value_proc=None, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
result = None
def prompt_func(text):
f = hide_input and hidden_prompt_func or visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f('')
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
# If a default is set and used, then the confirmation
# prompt is always skipped because that's the only thing
# that really makes sense.
elif default is not None:
return default
try:
result = value_proc(value)
except UsageError as e:
echo('Error: %s' % e.message, err=err)
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func('Repeat for confirmation: ')
if value2:
break
if value == value2:
return result
echo('Error: the two entered values do not match', err=err)
def confirm(text, default=False, abort=False, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default,
default and 'Y/n' or 'y/N')
while 1:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt, nl=False, err=err)
value = visible_prompt_func('').lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ('y', 'yes'):
rv = True
elif value in ('n', 'no'):
rv = False
elif value == '':
rv = default
else:
echo('Error: invalid input', err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def get_terminal_size():
"""Returns the current size of the terminal as tuple in the form
``(width, height)`` in columns and rows.
"""
# If shutil has get_terminal_size() (Python 3.3 and later) use that
if sys.version_info >= (3, 3):
import shutil
shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
if shutil_get_terminal_size:
sz = shutil_get_terminal_size()
return sz.columns, sz.lines
if get_winterm_size is not None:
return get_winterm_size()
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return
return cr
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
cr = ioctl_gwinsz(fd)
finally:
os.close(fd)
except Exception:
pass
if not cr or not cr[0] or not cr[1]:
cr = (os.environ.get('LINES', 25),
os.environ.get('COLUMNS', DEFAULT_COLUMNS))
return int(cr[1]), int(cr[0])
def echo_via_pager(text, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text: the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if not isinstance(text, string_types):
text = text_type(text)
from ._termui_impl import pager
return pager(text + '\n', color)
def progressbar(iterable=None, length=None, label=None, show_eta=True,
show_percent=None, show_pos=False,
item_show_func=None, fill_char='#', empty_char='-',
bar_template='%(label)s [%(bar)s] %(info)s',
info_sep=' ', width=36, file=None, color=None):
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already displayed. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `color` parameter. Added a `update` method to the
progressbar object.
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: a function called with the current item which
can return a string to show the current item
next to the progress bar. Note that the current
item can be `None`!
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: the file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
show_percent=show_percent, show_pos=show_pos,
item_show_func=item_show_func, fill_char=fill_char,
empty_char=empty_char, bar_template=bar_template,
info_sep=info_sep, file=file, label=label,
width=width, color=color)
def clear():
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
# If we're on Windows and we don't have colorama available, then we
# clear the screen by shelling out. Otherwise we can use an escape
# sequence.
if WIN:
os.system('cls')
else:
sys.stdout.write('\033[2J\033[1;1H')
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
blink=None, reverse=None, reset=True):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``reset`` (reset the color code only)
.. versionadded:: 2.0
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
bits = []
if fg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
except ValueError:
raise TypeError('Unknown color %r' % fg)
if bg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
except ValueError:
raise TypeError('Unknown color %r' % bg)
if bold is not None:
bits.append('\033[%dm' % (1 if bold else 22))
if dim is not None:
bits.append('\033[%dm' % (2 if dim else 22))
if underline is not None:
bits.append('\033[%dm' % (4 if underline else 24))
if blink is not None:
bits.append('\033[%dm' % (5 if blink else 25))
if reverse is not None:
bits.append('\033[%dm' % (7 if reverse else 27))
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return ''.join(bits)
def unstyle(text):
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(text, file=None, nl=True, err=False, color=None, **styles):
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
.. versionadded:: 2.0
"""
return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
def edit(text=None, editor=None, env=None, require_save=True,
extension='.txt', filename=None):
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
editor = Editor(editor=editor, env=env, require_save=require_save,
extension=extension)
if filename is None:
return editor.edit(text)
editor.edit_file(filename)
def launch(url, wait=False, locate=False):
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('http://click.pocoo.org/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: waits for the program to stop.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar = None
def getchar(echo=False):
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
f = _getchar
if f is None:
from ._termui_impl import getchar as f
return f(echo)
def pause(info='Press any key to continue ...', err=False):
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| mit |
chengdh/openerp-ktv | openerp/addons/mrp_repair/wizard/cancel_repair.py | 9 | 3890 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv,fields
from tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID is not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """ <form string="Cancel Repair" colspan="4">
<group col="2" colspan="2">
<label string="Do you want to continue?" colspan="4"/>
<separator colspan="4"/>
<button icon="gtk-stop" special="cancel" string="_No" readonly="0"/>
<button name="cancel_repair" string="_Yes" type="object" icon="gtk-ok"/>
</group>
</form>
"""
return res
repair_cancel()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
campagnola/acq4 | acq4/pyqtgraph/widgets/DiffTreeWidget.py | 7 | 5928 | # -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from ..pgcollections import OrderedDict
from .DataTreeWidget import DataTreeWidget
from .. import functions as fn
import types, traceback
import numpy as np
__all__ = ['DiffTreeWidget']
class DiffTreeWidget(QtGui.QWidget):
"""
Widget for displaying differences between hierarchical python data structures
(eg, nested dicts, lists, and arrays)
"""
def __init__(self, parent=None, a=None, b=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QHBoxLayout()
self.setLayout(self.layout)
self.trees = [DataTreeWidget(self), DataTreeWidget(self)]
for t in self.trees:
self.layout.addWidget(t)
if a is not None:
self.setData(a, b)
def setData(self, a, b):
"""
Set the data to be compared in this widget.
"""
self.data = (a, b)
self.trees[0].setData(a)
self.trees[1].setData(b)
return self.compare(a, b)
def compare(self, a, b, path=()):
"""
Compare data structure *a* to structure *b*.
Return True if the objects match completely.
Otherwise, return a structure that describes the differences:
{ 'type': bool
'len': bool,
'str': bool,
'shape': bool,
'dtype': bool,
'mask': array,
}
"""
bad = (255, 200, 200)
diff = []
# generate typestr, desc, childs for each object
typeA, descA, childsA, _ = self.trees[0].parse(a)
typeB, descB, childsB, _ = self.trees[1].parse(b)
if typeA != typeB:
self.setColor(path, 1, bad)
if descA != descB:
self.setColor(path, 2, bad)
if isinstance(a, dict) and isinstance(b, dict):
keysA = set(a.keys())
keysB = set(b.keys())
for key in keysA - keysB:
self.setColor(path+(key,), 0, bad, tree=0)
for key in keysB - keysA:
self.setColor(path+(key,), 0, bad, tree=1)
for key in keysA & keysB:
self.compare(a[key], b[key], path+(key,))
elif isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
for i in range(max(len(a), len(b))):
if len(a) <= i:
self.setColor(path+(i,), 0, bad, tree=1)
elif len(b) <= i:
self.setColor(path+(i,), 0, bad, tree=0)
else:
self.compare(a[i], b[i], path+(i,))
elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and a.shape == b.shape:
tableNodes = [tree.nodes[path].child(0) for tree in self.trees]
if a.dtype.fields is None and b.dtype.fields is None:
eq = self.compareArrays(a, b)
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for i in np.argwhere(~eq):
else:
if a.dtype == b.dtype:
for i,k in enumerate(a.dtype.fields.keys()):
eq = self.compareArrays(a[k], b[k])
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for j in np.argwhere(~eq):
# dict: compare keys, then values where keys match
# list:
# array: compare elementwise for same shape
def compareArrays(self, a, b):
intnan = -9223372036854775808 # happens when np.nan is cast to int
anans = np.isnan(a) | (a == intnan)
bnans = np.isnan(b) | (b == intnan)
eq = anans == bnans
mask = ~anans
eq[mask] = np.allclose(a[mask], b[mask])
return eq
def setColor(self, path, column, color, tree=None):
brush = fn.mkBrush(color)
# Color only one tree if specified.
if tree is None:
trees = self.trees
else:
trees = [self.trees[tree]]
for tree in trees:
item = tree.nodes[path]
item.setBackground(column, brush)
def _compare(self, a, b):
"""
Compare data structure *a* to structure *b*.
"""
# Check test structures are the same
assert type(info) is type(expect)
if hasattr(info, '__len__'):
assert len(info) == len(expect)
if isinstance(info, dict):
for k in info:
assert k in expect
for k in expect:
assert k in info
self.compare_results(info[k], expect[k])
elif isinstance(info, list):
for i in range(len(info)):
self.compare_results(info[i], expect[i])
elif isinstance(info, np.ndarray):
assert info.shape == expect.shape
assert info.dtype == expect.dtype
if info.dtype.fields is None:
intnan = -9223372036854775808 # happens when np.nan is cast to int
inans = np.isnan(info) | (info == intnan)
enans = np.isnan(expect) | (expect == intnan)
assert np.all(inans == enans)
mask = ~inans
assert np.allclose(info[mask], expect[mask])
else:
for k in info.dtype.fields.keys():
self.compare_results(info[k], expect[k])
else:
try:
assert info == expect
except Exception:
raise NotImplementedError("Cannot compare objects of type %s" % type(info))
| mit |
eharney/nova | nova/tests/fake_notifier.py | 29 | 2433 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import anyjson
from oslo import messaging
from nova import rpc
NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
FakeMessage = collections.namedtuple('Message',
['publisher_id', 'priority',
'event_type', 'payload'])
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
self._serializer = serializer or messaging.serializer.NoOpSerializer()
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id,
serializer=self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
anyjson.serialize(payload)
msg = FakeMessage(self.publisher_id, priority, event_type, payload)
NOTIFICATIONS.append(msg)
def stub_notifier(stubs):
stubs.Set(messaging, 'Notifier', FakeNotifier)
if rpc.NOTIFIER:
stubs.Set(rpc, 'NOTIFIER',
FakeNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=getattr(rpc.NOTIFIER, '_serializer',
None)))
| apache-2.0 |
shtouff/django | tests/admin_docs/models.py | 82 | 1592 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, help_text="place of work")
family = models.ForeignKey(Family, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| bsd-3-clause |
atdaemon/pip | pip/download.py | 2 | 32340 | from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.glibc import libc_ver
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
:param url: File path or url.
:param comes_from: Origin description of requirements.
:param session: Instance of pip.download.PipSession.
"""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| mit |
mhbu50/erpnext | erpnext/www/book_appointment/index.py | 2 | 5813 | import frappe
import datetime
import json
import pytz
from frappe import _
WEEKDAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
no_cache = 1
def get_context(context):
is_enabled = frappe.db.get_single_value('Appointment Booking Settings', 'enable_scheduling')
if is_enabled:
return context
else:
frappe.redirect_to_message(_("Appointment Scheduling Disabled"), _("Appointment Scheduling has been disabled for this site"),
http_status_code=302, indicator_color="red")
raise frappe.Redirect
@frappe.whitelist(allow_guest=True)
def get_appointment_settings():
settings = frappe.get_doc('Appointment Booking Settings')
settings.holiday_list = frappe.get_doc('Holiday List', settings.holiday_list)
return settings
@frappe.whitelist(allow_guest=True)
def get_timezones():
import pytz
return pytz.all_timezones
@frappe.whitelist(allow_guest=True)
def get_appointment_slots(date, timezone):
# Convert query to local timezones
format_string = '%Y-%m-%d %H:%M:%S'
query_start_time = datetime.datetime.strptime(date + ' 00:00:00', format_string)
query_end_time = datetime.datetime.strptime(date + ' 23:59:59', format_string)
query_start_time = convert_to_system_timezone(timezone, query_start_time)
query_end_time = convert_to_system_timezone(timezone, query_end_time)
now = convert_to_guest_timezone(timezone, datetime.datetime.now())
# Database queries
settings = frappe.get_doc('Appointment Booking Settings')
holiday_list = frappe.get_doc('Holiday List', settings.holiday_list)
timeslots = get_available_slots_between(query_start_time, query_end_time, settings)
# Filter and convert timeslots
converted_timeslots = []
for timeslot in timeslots:
converted_timeslot = convert_to_guest_timezone(timezone, timeslot)
# Check if holiday
if _is_holiday(converted_timeslot.date(), holiday_list):
converted_timeslots.append(dict(time=converted_timeslot, availability=False))
continue
# Check availability
if check_availabilty(timeslot, settings) and converted_timeslot >= now:
converted_timeslots.append(dict(time=converted_timeslot, availability=True))
else:
converted_timeslots.append(dict(time=converted_timeslot, availability=False))
date_required = datetime.datetime.strptime(date + ' 00:00:00', format_string).date()
converted_timeslots = filter_timeslots(date_required, converted_timeslots)
return converted_timeslots
def get_available_slots_between(query_start_time, query_end_time, settings):
records = _get_records(query_start_time, query_end_time, settings)
timeslots = []
appointment_duration = datetime.timedelta(
minutes=settings.appointment_duration)
for record in records:
if record.day_of_week == WEEKDAYS[query_start_time.weekday()]:
current_time = _deltatime_to_datetime(query_start_time, record.from_time)
end_time = _deltatime_to_datetime(query_start_time, record.to_time)
else:
current_time = _deltatime_to_datetime(query_end_time, record.from_time)
end_time = _deltatime_to_datetime(query_end_time, record.to_time)
while current_time + appointment_duration <= end_time:
timeslots.append(current_time)
current_time += appointment_duration
return timeslots
@frappe.whitelist(allow_guest=True)
def create_appointment(date, time, tz, contact):
format_string = '%Y-%m-%d %H:%M:%S'
scheduled_time = datetime.datetime.strptime(date + " " + time, format_string)
# Strip tzinfo from datetime objects since it's handled by the doctype
scheduled_time = scheduled_time.replace(tzinfo = None)
scheduled_time = convert_to_system_timezone(tz, scheduled_time)
scheduled_time = scheduled_time.replace(tzinfo = None)
# Create a appointment document from form
appointment = frappe.new_doc('Appointment')
appointment.scheduled_time = scheduled_time
contact = json.loads(contact)
appointment.customer_name = contact.get('name', None)
appointment.customer_phone_number = contact.get('number', None)
appointment.customer_skype = contact.get('skype', None)
appointment.customer_details = contact.get('notes', None)
appointment.customer_email = contact.get('email', None)
appointment.status = 'Open'
appointment.insert()
return appointment
# Helper Functions
def filter_timeslots(date, timeslots):
filtered_timeslots = []
for timeslot in timeslots:
if(timeslot['time'].date() == date):
filtered_timeslots.append(timeslot)
return filtered_timeslots
def convert_to_guest_timezone(guest_tz, datetimeobject):
guest_tz = pytz.timezone(guest_tz)
local_timezone = pytz.timezone(frappe.utils.get_time_zone())
datetimeobject = local_timezone.localize(datetimeobject)
datetimeobject = datetimeobject.astimezone(guest_tz)
return datetimeobject
def convert_to_system_timezone(guest_tz,datetimeobject):
guest_tz = pytz.timezone(guest_tz)
datetimeobject = guest_tz.localize(datetimeobject)
system_tz = pytz.timezone(frappe.utils.get_time_zone())
datetimeobject = datetimeobject.astimezone(system_tz)
return datetimeobject
def check_availabilty(timeslot, settings):
return frappe.db.count('Appointment', {'scheduled_time': timeslot}) < settings.number_of_agents
def _is_holiday(date, holiday_list):
for holiday in holiday_list.holidays:
if holiday.holiday_date == date:
return True
return False
def _get_records(start_time, end_time, settings):
records = []
for record in settings.availability_of_slots:
if record.day_of_week == WEEKDAYS[start_time.weekday()] or record.day_of_week == WEEKDAYS[end_time.weekday()]:
records.append(record)
return records
def _deltatime_to_datetime(date, deltatime):
time = (datetime.datetime.min + deltatime).time()
return datetime.datetime.combine(date.date(), time)
def _datetime_to_deltatime(date_time):
midnight = datetime.datetime.combine(date_time.date(), datetime.time.min)
return (date_time-midnight)
| gpl-3.0 |
lexus24/w16b_test | static/Brython3.1.3-20150514-095342/Lib/xml/sax/handler.py | 925 | 13922 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print(exception)
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| agpl-3.0 |
faux123/lge-FR-kernel | arch/ia64/scripts/unwcheck.py | 916 | 1718 | #!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
vpodzime/anaconda | tests/pyanaconda_tests/iutil_test.py | 7 | 28119 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
# Martin Kolman <mkolman@redhat.com>
from pyanaconda import iutil
import unittest
import os
import tempfile
import signal
import shutil
from test_constants import ANACONDA_TEST_DIR
from timer import timer
class UpcaseFirstLetterTests(unittest.TestCase):
def setUp(self):
# create the directory used for file/folder tests
if not os.path.exists(ANACONDA_TEST_DIR):
os.makedirs(ANACONDA_TEST_DIR)
def tearDown(self):
# remove the testing directory
shutil.rmtree(ANACONDA_TEST_DIR)
def upcase_first_letter_test(self):
"""Upcasing first letter should work as expected."""
# no change
self.assertEqual(iutil.upcase_first_letter("Czech RePuBliC"),
"Czech RePuBliC")
# simple case
self.assertEqual(iutil.upcase_first_letter("czech"), "Czech")
# first letter only
self.assertEqual(iutil.upcase_first_letter("czech republic"),
"Czech republic")
# no lowercase
self.assertEqual(iutil.upcase_first_letter("czech Republic"),
"Czech Republic")
class RunProgramTests(unittest.TestCase):
def run_program_test(self):
"""Test the _run_program method."""
# correct calling should return rc==0
self.assertEqual(iutil._run_program(['ls'])[0], 0)
# incorrect calling should return rc!=0
self.assertNotEqual(iutil._run_program(['ls', '--asdasd'])[0], 0)
# check if an int is returned for bot success and error
self.assertIsInstance(iutil._run_program(['ls'])[0], int)
self.assertIsInstance(iutil._run_program(['ls', '--asdasd'])[0], int)
# error should raise OSError
with self.assertRaises(OSError):
iutil._run_program(['asdasdadasd'])
def exec_with_redirect_test(self):
"""Test execWithRedirect."""
# correct calling should return rc==0
self.assertEqual(iutil.execWithRedirect('ls', []), 0)
# incorrect calling should return rc!=0
self.assertNotEqual(iutil.execWithRedirect('ls', ['--asdasd']), 0)
def exec_with_capture_test(self):
"""Test execWithCapture."""
# check some output is returned
self.assertGreater(len(iutil.execWithCapture('ls', ['--help'])), 0)
# check no output is returned
self.assertEqual(len(iutil.execWithCapture('true', [])), 0)
def exec_with_capture_no_stderr_test(self):
"""Test execWithCapture with no stderr"""
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "output"
echo "error" >&2
""")
testscript.flush()
# check that only the output is captured
self.assertEqual(
iutil.execWithCapture("/bin/sh", [testscript.name], filter_stderr=True),
"output\n")
# check that both output and error are captured
self.assertEqual(iutil.execWithCapture("/bin/sh", [testscript.name]),
"output\nerror\n")
def exec_readlines_test(self):
"""Test execReadlines."""
# test no lines are returned
self.assertEqual(list(iutil.execReadlines("true", [])), [])
# test some lines are returned
self.assertGreater(len(list(iutil.execReadlines("ls", ["--help"]))), 0)
# check that it always returns an iterator for both
# if there is some output and if there isn't any
self.assertTrue(hasattr(iutil.execReadlines("ls", ["--help"]), "__iter__"))
self.assertTrue(hasattr(iutil.execReadlines("true", []), "__iter__"))
def exec_readlines_test_normal_output(self):
"""Test the output of execReadlines."""
# Test regular-looking output
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo "three"
exit 0
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(StopIteration, rl_iterator.next)
# Test output with no end of line
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo -n "three"
exit 0
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(StopIteration, rl_iterator.next)
def exec_readlines_test_exits(self):
"""Test execReadlines in different child exit situations."""
# Tests that exit on signal will raise OSError once output
# has been consumed, otherwise the test will exit normally.
# Test a normal, non-0 exit
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo "three"
exit 1
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(OSError, rl_iterator.next)
# Test exit on signal
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo "three"
kill -TERM $$
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(OSError, rl_iterator.next)
# Repeat the above two tests, but exit before a final newline
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo -n "three"
exit 1
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(OSError, rl_iterator.next)
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
echo "two"
echo -n "three"
kill -TERM $$
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(OSError, rl_iterator.next)
def exec_readlines_test_signals(self):
"""Test execReadlines and signal receipt."""
# ignored signal
old_HUP_handler = signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
kill -HUP $PPID
echo "two"
echo -n "three"
exit 0
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(StopIteration, rl_iterator.next)
finally:
signal.signal(signal.SIGHUP, old_HUP_handler)
# caught signal
def _hup_handler(signum, frame):
pass
old_HUP_handler = signal.signal(signal.SIGHUP, _hup_handler)
try:
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
echo "one"
kill -HUP $PPID
echo "two"
echo -n "three"
exit 0
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
self.assertEqual(rl_iterator.next(), "one")
self.assertEqual(rl_iterator.next(), "two")
self.assertEqual(rl_iterator.next(), "three")
self.assertRaises(StopIteration, rl_iterator.next)
finally:
signal.signal(signal.SIGHUP, old_HUP_handler)
def start_program_preexec_fn_test(self):
"""Test passing preexec_fn to startProgram."""
marker_text = "yo wassup man"
# Create a temporary file that will be written before exec
with tempfile.NamedTemporaryFile() as testfile:
# Write something to testfile to show this method was run
def preexec():
# Open a copy of the file here since close_fds has already closed the descriptor
testcopy = open(testfile.name, 'w')
testcopy.write(marker_text)
testcopy.close()
with timer(5):
# Start a program that does nothing, with a preexec_fn
proc = iutil.startProgram(["/bin/true"], preexec_fn=preexec)
proc.communicate()
# Rewind testfile and look for the text
testfile.seek(0, os.SEEK_SET)
self.assertEqual(testfile.read(), marker_text)
def start_program_stdout_test(self):
"""Test redirecting stdout with startProgram."""
marker_text = "yo wassup man"
# Create a temporary file that will be written by the program
with tempfile.NamedTemporaryFile() as testfile:
# Open a new copy of the file so that the child doesn't close and
# delete the NamedTemporaryFile
stdout = open(testfile.name, 'w')
with timer(5):
proc = iutil.startProgram(["/bin/echo", marker_text], stdout=stdout)
proc.communicate()
# Rewind testfile and look for the text
testfile.seek(0, os.SEEK_SET)
self.assertEqual(testfile.read().strip(), marker_text)
def start_program_reset_handlers_test(self):
"""Test the reset_handlers parameter of startProgram."""
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
# Just hang out and do nothing, forever
while true ; do sleep 1 ; done
""")
testscript.flush()
# Start a program with reset_handlers
proc = iutil.startProgram(["/bin/sh", testscript.name])
with timer(5):
# Kill with SIGPIPE and check that the python's SIG_IGN was not inheritted
# The process should die on the signal.
proc.send_signal(signal.SIGPIPE)
proc.communicate()
self.assertEqual(proc.returncode, -(signal.SIGPIPE))
# Start another copy without reset_handlers
proc = iutil.startProgram(["/bin/sh", testscript.name], reset_handlers=False)
with timer(5):
# Kill with SIGPIPE, then SIGTERM, and make sure SIGTERM was the one
# that worked.
proc.send_signal(signal.SIGPIPE)
proc.terminate()
proc.communicate()
self.assertEqual(proc.returncode, -(signal.SIGTERM))
def exec_readlines_auto_kill_test(self):
"""Test execReadlines with reading only part of the output"""
with tempfile.NamedTemporaryFile() as testscript:
testscript.write("""#!/bin/sh
# Output forever
while true; do
echo hey
done
""")
testscript.flush()
with timer(5):
rl_iterator = iutil.execReadlines("/bin/sh", [testscript.name])
# Save the process context
proc = rl_iterator._proc
# Read two lines worth
self.assertEqual(rl_iterator.next(), "hey")
self.assertEqual(rl_iterator.next(), "hey")
# Delete the iterator and wait for the process to be killed
del rl_iterator
proc.communicate()
# Check that the process is gone
self.assertIsNotNone(proc.poll())
def watch_process_test(self):
"""Test watchProcess"""
def test_still_running():
with timer(5):
# Run something forever so we can kill it
proc = iutil.startProgram(["/bin/sh", "-c", "while true; do sleep 1; done"])
iutil.watchProcess(proc, "test1")
proc.kill()
# Wait for the SIGCHLD
signal.pause()
self.assertRaises(iutil.ExitError, test_still_running)
# Make sure watchProcess checks that the process has not already exited
with timer(5):
proc = iutil.startProgram(["true"])
proc.communicate()
self.assertRaises(iutil.ExitError, iutil.watchProcess, proc, "test2")
class MiscTests(unittest.TestCase):
def get_dir_size_test(self):
"""Test the getDirSize."""
# dev null should have a size == 0
self.assertEqual(iutil.getDirSize('/dev/null'), 0)
# incorrect path should also return 0
self.assertEqual(iutil.getDirSize('/dev/null/foo'), 0)
# check if an int is always returned
self.assertIsInstance(iutil.getDirSize('/dev/null'), int)
self.assertIsInstance(iutil.getDirSize('/dev/null/foo'), int)
# TODO: mock some dirs and check if their size is
# computed correctly
def mkdir_chain_test(self):
"""Test mkdirChain."""
# don't fail if directory path already exists
iutil.mkdirChain('/dev/null')
iutil.mkdirChain('/')
iutil.mkdirChain('/tmp')
# create a path and test it exists
test_folder = "test_mkdir_chain"
test_paths = [
"foo",
"foo/bar/baz",
u"foo/bar/baz",
"",
"čřščščřščř",
u"čřščščřščř",
"asdasd asdasd",
"! spam"
]
# join with the toplevel test folder and the folder for this
# test
test_paths = [os.path.join(ANACONDA_TEST_DIR, test_folder, p)
for p in test_paths]
def create_return(path):
iutil.mkdirChain(path)
return path
# create the folders and check that they exist
for p in test_paths:
self.assertTrue(os.path.exists(create_return(p)))
# try to create them again - all the paths should already exist
# and the mkdirChain function needs to handle that
# without a traceback
for p in test_paths:
iutil.mkdirChain(p)
def get_active_console_test(self):
"""Test get_active_console."""
# at least check if a string is returned
self.assertIsInstance(iutil.get_active_console(), str)
def is_console_on_vt_test(self):
"""Test isConsoleOnVirtualTerminal."""
# at least check if a bool is returned
self.assertIsInstance(iutil.isConsoleOnVirtualTerminal(), bool)
def parse_nfs_url_test(self):
"""Test parseNfsUrl."""
# empty NFS url should return 3 blanks
self.assertEqual(iutil.parseNfsUrl(""), ("", "", ""))
# the string is delimited by :, there is one prefix and 3 parts,
# the prefix is discarded and all parts after the 3th part
# are also discarded
self.assertEqual(iutil.parseNfsUrl("discard:options:host:path"),
("options", "host", "path"))
self.assertEqual(iutil.parseNfsUrl("discard:options:host:path:foo:bar"),
("options", "host", "path"))
self.assertEqual(iutil.parseNfsUrl(":options:host:path::"),
("options", "host", "path"))
self.assertEqual(iutil.parseNfsUrl(":::::"),
("", "", ""))
# if there is only prefix & 2 parts,
# the two parts are host and path
self.assertEqual(iutil.parseNfsUrl("prefix:host:path"),
("", "host", "path"))
self.assertEqual(iutil.parseNfsUrl(":host:path"),
("", "host", "path"))
self.assertEqual(iutil.parseNfsUrl("::"),
("", "", ""))
# if there is only a prefix and single part,
# the part is the host
self.assertEqual(iutil.parseNfsUrl("prefix:host"),
("", "host", ""))
self.assertEqual(iutil.parseNfsUrl(":host"),
("", "host", ""))
self.assertEqual(iutil.parseNfsUrl(":"),
("", "", ""))
def vt_activate_test(self):
"""Test vtActivate."""
# pylint: disable=no-member
def raise_os_error(*args, **kwargs):
raise OSError
_execWithRedirect = iutil.vtActivate.func_globals['execWithRedirect']
try:
# chvt does not exist on all platforms
# and the function needs to correctly survie that
iutil.vtActivate.func_globals['execWithRedirect'] = raise_os_error
self.assertEqual(iutil.vtActivate(2), False)
finally:
iutil.vtActivate.func_globals['execWithRedirect'] = _execWithRedirect
def get_deep_attr_test(self):
"""Test getdeepattr."""
# pylint: disable=attribute-defined-outside-init
class O(object):
pass
a = O()
a.b = O()
a.b1 = 1
a.b.c = 2
a.b.c1 = "ř"
self.assertEqual(iutil.getdeepattr(a, "b1"), 1)
self.assertEqual(iutil.getdeepattr(a, "b.c"), 2)
self.assertEqual(iutil.getdeepattr(a, "b.c1"), "ř")
# be consistent with getattr and throw
# AttributeError if non-existent attribute is requested
with self.assertRaises(AttributeError):
iutil.getdeepattr(a, "")
with self.assertRaises(AttributeError):
iutil.getdeepattr(a, "b.c.d")
def set_deep_attr_test(self):
"""Test setdeepattr."""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=no-member
class O(object):
pass
a = O()
a.b = O()
a.b1 = 1
a.b.c = O()
a.b.c1 = "ř"
# set to a new attribute
iutil.setdeepattr(a, "b.c.d", True)
self.assertEqual(a.b.c.d, True)
# override existing attribute
iutil.setdeepattr(a, "b.c", 1234)
self.assertEqual(a.b.c, 1234)
# "" is actually a valid attribute name
# that can be only accessed by getattr
iutil.setdeepattr(a, "", 1234)
self.assertEqual(getattr(a, ""), 1234)
iutil.setdeepattr(a, "b.", 123)
self.assertEqual(iutil.getdeepattr(a, "b."), 123)
# error should raise AttributeError
with self.assertRaises(AttributeError):
iutil.setdeepattr(a, "b.c.d.e.f.g.h", 1234)
def strip_accents_test(self):
"""Test strip_accents."""
# string needs to be Unicode,
# otherwise TypeError is raised
with self.assertRaises(TypeError):
iutil.strip_accents("")
with self.assertRaises(TypeError):
iutil.strip_accents("abc")
with self.assertRaises(TypeError):
iutil.strip_accents("ěščřžýáíé")
# empty Unicode string
self.assertEquals(iutil.strip_accents(u""), u"")
# some Czech accents
self.assertEquals(iutil.strip_accents(u"ěščřžýáíéúů"), u"escrzyaieuu")
self.assertEquals(iutil.strip_accents(u"v češtině"), u"v cestine")
self.assertEquals(iutil.strip_accents(u"měšťánek rozšíří HÁČKY"),
u"mestanek rozsiri HACKY")
self.assertEquals(iutil.strip_accents(u"nejneobhospodařovávatelnějšímu"),
u"nejneobhospodarovavatelnejsimu")
# some German umlauts
self.assertEquals(iutil.strip_accents(u"Lärmüberhörer"), u"Larmuberhorer")
self.assertEquals(iutil.strip_accents(u"Heizölrückstoßabdämpfung"),
u"Heizolrucksto\xdfabdampfung")
# some Japanese
self.assertEquals(iutil.strip_accents(u"日本語"), u"\u65e5\u672c\u8a9e")
self.assertEquals(iutil.strip_accents(u"アナコンダ"), # Anaconda
u"\u30a2\u30ca\u30b3\u30f3\u30bf")
# combined
input_string = u"ASCI měšťánek アナコンダ Heizölrückstoßabdämpfung"
output_string =u"ASCI mestanek \u30a2\u30ca\u30b3\u30f3\u30bf Heizolrucksto\xdfabdampfung"
self.assertEquals(iutil.strip_accents(input_string), output_string)
def cmp_obj_attrs_test(self):
"""Test cmp_obj_attrs."""
# pylint: disable=attribute-defined-outside-init
class O(object):
pass
a = O()
a.b = 1
a.c = 2
a1 = O()
a1.b = 1
a1.c = 2
b = O()
b.b = 1
b.c = 3
# a class should have it's own attributes
self.assertTrue(iutil.cmp_obj_attrs(a, a, ["b", "c"]))
self.assertTrue(iutil.cmp_obj_attrs(a1, a1, ["b", "c"]))
self.assertTrue(iutil.cmp_obj_attrs(b, b, ["b", "c"]))
# a and a1 should have the same attributes
self.assertTrue(iutil.cmp_obj_attrs(a, a1, ["b", "c"]))
self.assertTrue(iutil.cmp_obj_attrs(a1, a, ["b", "c"]))
self.assertTrue(iutil.cmp_obj_attrs(a1, a, ["c", "b"]))
# missing attributes are considered a mismatch
self.assertFalse(iutil.cmp_obj_attrs(a, a1, ["b", "c", "d"]))
# empty attribute list is not a mismatch
self.assertTrue(iutil.cmp_obj_attrs(a, b, []))
# attributes of a and b differ
self.assertFalse(iutil.cmp_obj_attrs(a, b, ["b", "c"]))
self.assertFalse(iutil.cmp_obj_attrs(b, a, ["b", "c"]))
self.assertFalse(iutil.cmp_obj_attrs(b, a, ["c", "b"]))
def to_ascii_test(self):
"""Test _toASCII."""
# works with strings only, chokes on Unicode strings
with self.assertRaises(ValueError):
iutil._toASCII(u" ")
with self.assertRaises(ValueError):
iutil._toASCII(u"ABC")
with self.assertRaises(ValueError):
iutil._toASCII(u"Heizölrückstoßabdämpfung")
# but empty Unicode string is fine :)
iutil._toASCII(u"")
# check some conversions
self.assertEqual(iutil._toASCII(""), "")
self.assertEqual(iutil._toASCII(" "), " ")
self.assertEqual(iutil._toASCII("&@`'łŁ!@#$%^&*{}[]$'<>*"),
"&@`'\xc5\x82\xc5\x81!@#$%^&*{}[]$'<>*")
self.assertEqual(iutil._toASCII("ABC"), "ABC")
self.assertEqual(iutil._toASCII("aBC"), "aBC")
_out = "Heiz\xc3\xb6lr\xc3\xbccksto\xc3\x9fabd\xc3\xa4mpfung"
self.assertEqual(iutil._toASCII("Heizölrückstoßabdämpfung"), _out)
def upper_ascii_test(self):
"""Test upperASCII."""
self.assertEqual(iutil.upperASCII(""),"")
self.assertEqual(iutil.upperASCII("a"),"A")
self.assertEqual(iutil.upperASCII("A"),"A")
self.assertEqual(iutil.upperASCII("aBc"),"ABC")
self.assertEqual(iutil.upperASCII("_&*'@#$%^aBcžčŘ"),
"_&*'@#$%^ABC\xc5\xbe\xc4\x8d\xc5\x98")
_out = "HEIZ\xc3\xb6LR\xc3\xbcCKSTO\xc3\x9fABD\xc3\xa4MPFUNG"
self.assertEqual(iutil.upperASCII("Heizölrückstoßabdämpfung"), _out)
def lower_ascii_test(self):
"""Test lowerASCII."""
self.assertEqual(iutil.lowerASCII(""),"")
self.assertEqual(iutil.lowerASCII("A"),"a")
self.assertEqual(iutil.lowerASCII("a"),"a")
self.assertEqual(iutil.lowerASCII("aBc"),"abc")
self.assertEqual(iutil.lowerASCII("_&*'@#$%^aBcžčŘ"),
"_&*'@#$%^abc\xc5\xbe\xc4\x8d\xc5\x98")
_out = "heiz\xc3\xb6lr\xc3\xbccksto\xc3\x9fabd\xc3\xa4mpfung"
self.assertEqual(iutil.lowerASCII("Heizölrückstoßabdämpfung"), _out)
def have_word_match_test(self):
"""Test have_word_match."""
self.assertTrue(iutil.have_word_match("word1 word2", "word1 word2 word3"))
self.assertTrue(iutil.have_word_match("word1 word2", "word2 word1 word3"))
self.assertTrue(iutil.have_word_match("word2 word1", "word3 word1 word2"))
self.assertTrue(iutil.have_word_match("word1", "word1 word2"))
self.assertTrue(iutil.have_word_match("word1 word2", "word2word1 word3"))
self.assertTrue(iutil.have_word_match("word2 word1", "word3 word1word2"))
self.assertTrue(iutil.have_word_match("word1", "word1word2"))
self.assertTrue(iutil.have_word_match("", "word1"))
self.assertFalse(iutil.have_word_match("word3 word1", "word1"))
self.assertFalse(iutil.have_word_match("word1 word3", "word1 word2"))
self.assertFalse(iutil.have_word_match("word3 word2", "word1 word2"))
self.assertFalse(iutil.have_word_match("word1word2", "word1 word2 word3"))
self.assertFalse(iutil.have_word_match("word1", ""))
self.assertFalse(iutil.have_word_match("word1", None))
self.assertFalse(iutil.have_word_match(None, "word1"))
self.assertFalse(iutil.have_word_match("", None))
self.assertFalse(iutil.have_word_match(None, ""))
self.assertFalse(iutil.have_word_match(None, None))
# Compare unicode and str and make sure nothing crashes
self.assertTrue(iutil.have_word_match("fête", u"fête champêtre"))
self.assertTrue(iutil.have_word_match(u"fête", "fête champêtre"))
def parent_dir_test(self):
"""Test the parent_dir function"""
dirs = [("", ""), ("/", ""), ("/home/", ""), ("/home/bcl", "/home"), ("home/bcl", "home"),
("/home/bcl/", "/home"), ("/home/extra/bcl", "/home/extra"),
("/home/extra/bcl/", "/home/extra"), ("/home/extra/../bcl/", "/home")]
for d, r in dirs:
self.assertEquals(iutil.parent_dir(d), r)
| gpl-2.0 |
loulich/Couchpotato | libs/tornado/platform/select.py | 79 | 2633 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Select-based IOLoop implementation.
Used as a fallback for systems that don't support epoll or kqueue.
"""
from __future__ import absolute_import, division, print_function, with_statement
import select
from tornado.ioloop import IOLoop, PollIOLoop
class _Select(object):
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
def __init__(self):
self.read_fds = set()
self.write_fds = set()
self.error_fds = set()
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
def close(self):
pass
def register(self, fd, events):
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
raise IOError("fd %s already registered" % fd)
if events & IOLoop.READ:
self.read_fds.add(fd)
if events & IOLoop.WRITE:
self.write_fds.add(fd)
if events & IOLoop.ERROR:
self.error_fds.add(fd)
# Closed connections are reported as errors by epoll and kqueue,
# but as zero-byte reads by select, so when errors are requested
# we need to listen for both read and error.
#self.read_fds.add(fd)
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
self.read_fds.discard(fd)
self.write_fds.discard(fd)
self.error_fds.discard(fd)
def poll(self, timeout):
readable, writeable, errors = select.select(
self.read_fds, self.write_fds, self.error_fds, timeout)
events = {}
for fd in readable:
events[fd] = events.get(fd, 0) | IOLoop.READ
for fd in writeable:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
for fd in errors:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
class SelectIOLoop(PollIOLoop):
def initialize(self, **kwargs):
super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs)
| gpl-3.0 |
dattatreya303/zulip | zerver/tests/test_i18n.py | 14 | 4660 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any
import django
import mock
from django.test import TestCase
from django.conf import settings
from django.http import HttpResponse
from six.moves.http_cookies import SimpleCookie
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.management.commands import makemessages
class TranslationTestCase(ZulipTestCase):
"""
Tranlations strings should change with locale. URLs should be locale
aware.
"""
# e.g. self.client_post(url) if method is "post"
def fetch(self, method, url, expected_status, **kwargs):
# type: (str, str, int, **Any) -> HttpResponse
response = getattr(self.client, method)(url, **kwargs)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
return response
def test_accept_language_header(self):
# type: () -> None
languages = [('en', u'Register'),
('de', u'Registrieren'),
('sr', u'Региструј се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
response = self.fetch('get', '/integrations/', 200,
HTTP_ACCEPT_LANGUAGE=lang)
self.assert_in_response(word, response)
def test_cookie(self):
# type: () -> None
languages = [('en', u'Register'),
('de', u'Registrieren'),
('sr', u'Региструј се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
# Applying str function to LANGUAGE_COOKIE_NAME to convert unicode
# into an ascii otherwise SimpleCookie will raise an exception
self.client.cookies = SimpleCookie({str(settings.LANGUAGE_COOKIE_NAME): lang})
response = self.fetch('get', '/integrations/', 200)
self.assert_in_response(word, response)
def test_i18n_urls(self):
# type: () -> None
languages = [('en', u'Register'),
('de', u'Registrieren'),
('sr', u'Региструј се'),
('zh-hans', u'注册'),
]
for lang, word in languages:
response = self.fetch('get', '/{}/integrations/'.format(lang), 200)
self.assert_in_response(word, response)
class JsonTranslationTestCase(ZulipTestCase):
@mock.patch('zerver.lib.request._')
def test_json_error(self, mock_gettext):
# type: (Any) -> None
dummy_value = "Some other language '%s'"
mock_gettext.return_value = dummy_value
self.login("hamlet@zulip.com")
result = self.client_post("/json/refer_friend",
HTTP_ACCEPT_LANGUAGE='de')
self.assert_json_error_contains(result,
dummy_value % 'email',
status_code=400)
@mock.patch('zerver.views.auth._')
def test_jsonable_error(self, mock_gettext):
# type: (Any) -> None
dummy_value = "Some other language"
mock_gettext.return_value = dummy_value
self.login("hamlet@zulip.com")
result = self.client_get("/de/accounts/login/jwt/")
self.assert_json_error_contains(result,
dummy_value,
status_code=400)
class FrontendRegexTestCase(TestCase):
def test_regexes(self):
# type: () -> None
command = makemessages.Command()
data = [
('{{#tr context}}english text with __variable__{{/tr}}{{/tr}}',
'english text with __variable__'),
('{{t "english text" }}, "extra"}}',
'english text'),
("{{t 'english text' }}, 'extra'}}",
'english text'),
('i18n.t("english text"), "extra",)',
'english text'),
('i18n.t("english text", context), "extra",)',
'english text'),
("i18n.t('english text'), 'extra',)",
'english text'),
("i18n.t('english text', context), 'extra',)",
'english text'),
]
for input_text, expected in data:
result = list(command.extract_strings(input_text).keys())
self.assertEqual(len(result), 1)
self.assertEqual(result[0], expected)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/nbconvert/preprocessors/svg2pdf.py | 8 | 3527 | """Module containing a preprocessor that converts outputs in the notebook from
one format to another.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import base64
import io
import os
import sys
import subprocess
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils.traitlets import Unicode
from .convertfigures import ConvertFiguresPreprocessor
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
INKSCAPE_APP = '/Applications/Inkscape.app/Contents/Resources/bin/inkscape'
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SVG2PDFPreprocessor(ConvertFiguresPreprocessor):
"""
Converts all of the outputs in a notebook from SVG to PDF.
"""
def _from_format_default(self):
return 'svg'
def _to_format_default(self):
return 'application/pdf'
command = Unicode(config=True,
help="""The command to use for converting SVG to PDF
This string is a template, which will be formatted with the keys
to_filename and from_filename.
The conversion call must read the SVG from {from_flename},
and write a PDF to {to_filename}.
""")
def _command_default(self):
return self.inkscape + \
' --without-gui --export-pdf="{to_filename}" "{from_filename}"'
inkscape = Unicode(config=True, help="The path to Inkscape, if necessary")
def _inkscape_default(self):
if sys.platform == "darwin":
if os.path.isfile(INKSCAPE_APP):
return INKSCAPE_APP
return "inkscape"
def convert_figure(self, data_format, data):
"""
Convert a single SVG figure to PDF. Returns converted data.
"""
#Work in a temporary directory
with TemporaryDirectory() as tmpdir:
#Write fig to temp file
input_filename = os.path.join(tmpdir, 'figure.' + data_format)
# SVG data is unicode text
with io.open(input_filename, 'w', encoding='utf8') as f:
f.write(data)
#Call conversion application
output_filename = os.path.join(tmpdir, 'figure.pdf')
shell = self.command.format(from_filename=input_filename,
to_filename=output_filename)
subprocess.call(shell, shell=True) #Shell=True okay since input is trusted.
#Read output from drive
# return value expects a filename
if os.path.isfile(output_filename):
with open(output_filename, 'rb') as f:
# PDF is a nb supported binary, data type, so base64 encode.
return base64.encodestring(f.read())
else:
raise TypeError("Inkscape svg to pdf conversion failed")
| gpl-3.0 |
diofant/diofant | diofant/core/function.py | 1 | 77992 | """
There are three types of functions implemented in Diofant:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
Examples
========
>>> f(x)
f(x)
>>> print(repr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import annotations
import collections
import inspect
import typing
import mpmath
import mpmath.libmp as mlib
from ..utilities import default_sort_key, ordered
from ..utilities.iterables import uniq
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic
from .cache import cacheit
from .compatibility import as_int, is_sequence, iterable
from .containers import Dict, Tuple
from .decorators import _sympifyit
from .evalf import PrecisionExhausted
from .evaluate import global_evaluate
from .expr import AtomicExpr, Expr
from .logic import fuzzy_and
from .numbers import Float, Integer, Rational, nan
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(Integer(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul or a.is_MatMul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
"""Raised when an expansion pole is encountered."""
class ArgumentIndexError(ValueError):
"""Raised when an invalid operation for positional argument happened."""
def __str__(self):
return ('Invalid operation with argument number %s for Function %s' %
(self.args[1], self.args[0]))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
def __init__(self, *args, **kwargs):
assert hasattr(self, 'eval')
evalargspec = inspect.getfullargspec(self.eval)
if evalargspec.varargs:
evalargs = None
else:
evalargs = len(evalargspec.args) - 1 # subtract 1 for cls
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
evalargs = tuple(range(evalargs - len(evalargspec.defaults),
evalargs + 1))
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', self.__dict__.get('nargs', evalargs))
super().__init__(args, kwargs)
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError('Incorrectly specified nargs as %s' % str(nargs))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = as_int(nargs),
self._nargs = nargs
@property
def __signature__(self):
"""
Allow inspect.signature to give a useful signature for
Function subclasses.
"""
# TODO: Look at nargs
return inspect.signature(self.eval)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0()
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f(1).nargs
Naturals0()
>>> len(f(1).args)
1
"""
from ..sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(self):
if issubclass(self, AppliedUndef):
return f'Function({self.__name__!r})'
else:
return self.__name__
def __str__(self):
return self.__name__
class Application(Expr, metaclass=FunctionClass):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from ..sets.fancysets import Naturals0
from ..sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError(f'Unknown options: {options}')
if evaluate:
if nan in args:
return nan
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super().__new__(cls, *args, **options)
# make nargs uniform here
try:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(obj.nargs):
nargs = tuple(ordered(set(obj.nargs)))
elif obj.nargs is not None:
nargs = as_int(obj.nargs),
else:
nargs = None
except AttributeError:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
"""
return
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and old == self.func and
len(self.args) in new.nargs):
return new(*self.args)
class Function(Application, Expr):
"""Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> g = g(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
In the following example Function is used as a base class for
``MyFunc`` that represents a mathematical function *MyFunc*. Suppose
that it is well known, that *MyFunc(0)* is *1* and *MyFunc* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *MyFunc(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> class MyFunc(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x == 0:
... return Integer(1)
... elif x is oo:
... return Integer(0)
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> MyFunc(0) + sin(0)
1
>>> MyFunc(oo)
0
>>> MyFunc(3.54).evalf() # Not yet implemented for MyFunc.
MyFunc(3.54)
>>> MyFunc(I).is_real
False
In order for ``MyFunc`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``MyFunc`` can take one or two arguments
then,
>>> class MyFunc(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super().__new__(cls, *args, **options)
if not evaluate or not isinstance(result, cls):
return result
pr = max(cls._should_evalf(a) for a in result.args)
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr), strict=False)
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
re, im = arg.as_real_imag()
l = [a._prec for a in [re, im] if a.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
"""Nice order of classes."""
from ..sets.fancysets import Naturals0
funcs = {
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
try:
if isinstance(self.func, UndefinedFunction):
# Shouldn't lookup in mpmath but might have ._imp_
raise AttributeError
fname = self.func.__name__
if not hasattr(mpmath, fname):
from ..utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*[i.evalf(prec) for i in self.args]), prec)
except (AttributeError, TypeError, ValueError, PrecisionExhausted):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da == 0:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def as_base_exp(self):
"""Returns the method as the 2-tuple (base, exponent)."""
return self, Integer(1)
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from ..utilities.misc import filldedent
raise PoleError(filldedent("""
Asymptotic expansion of %s around %s is
not implemented.""" % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> loggamma(1/x)._eval_nseries(x, 0, None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from ..series import Order
from ..sets.sets import FiniteSet
from .symbol import Dummy
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(isinstance(t, Expr) and t.is_finite is False for t in args0):
from .numbers import oo, zoo
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any(t.has(oo, -oo, zoo, nan) for t in a0):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy()]*len(z)
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
return s.subs({v: zi}).expand() + Order(o.expr.subs({v: zi}), x)
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
# for example when e = sin(x+1) or e = sin(cos(x))
# let's try the general algorithm
term = e.subs({x: 0})
if term.is_finite is False:
raise PoleError(f'Cannot expand {self} around 0')
series = term
fact = Integer(1)
_x = Dummy('x', real=True, positive=True)
e = e.subs({x: _x})
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs({_x: 0})
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
f_series = order = Integer(0)
i, terms = 0, []
while order == 0 or i <= n:
term = self.taylor_term(i, arg, *terms)
term = term.nseries(x, n=n, logx=logx)
terms.append(term)
if term:
f_series += term
order = Order(term, x)
i += 1
return f_series + order
def fdiff(self, argindex=1):
"""Returns the first derivative of the function."""
from .symbol import Dummy
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
if self.args[argindex - 1].is_Symbol:
for i in range(len(self.args)):
if i == argindex - 1:
continue
# See issue sympy/sympy#8510
if self.args[argindex - 1] in self.args[i].free_symbols:
break
else:
return Derivative(self, self.args[argindex - 1], evaluate=False)
# See issue sympy/sympy#4624 and issue sympy/sympy#4719
# and issue sympy/sympy#5600
arg_dummy = Dummy(f'xi_{argindex:d}')
arg_dummy.dummy_index = hash(self.args[argindex - 1])
new_args = list(self.args)
new_args[argindex-1] = arg_dummy
return Subs(Derivative(self.func(*new_args), arg_dummy),
(arg_dummy, self.args[argindex - 1]))
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from ..series import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
f'{self.func} has no _eval_as_leading_term routine')
else:
return self.func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
def __new__(cls, *args, **options):
args = list(map(sympify, args))
obj = super().__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
class UndefinedFunction(FunctionClass):
"""The (meta)class of undefined functions."""
def __new__(cls, name, **kwargs):
ret = type.__new__(cls, name, (AppliedUndef,), kwargs)
ret.__module__ = None
return ret
def __instancecheck__(self, instance):
return self in type(instance).__mro__
def __eq__(self, other):
return (isinstance(other, self.__class__) and
(self.class_key() == other.class_key()))
def __hash__(self):
return super().__hash__()
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> F = WildFunction('F')
>>> F.nargs
Naturals0()
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include: set[typing.Any] = set()
def __init__(self, name, **assumptions):
from ..sets.sets import FiniteSet, Set
self.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
else:
nargs = as_int(nargs),
nargs = FiniteSet(*nargs)
self.nargs = nargs
def _matches(self, expr, repl_dict={}):
"""Helper method for match()
See Also
========
diofant.core.basic.Basic.matches
"""
if not isinstance(expr, (AppliedUndef, Function)):
return
if len(expr.args) not in self.nargs:
return
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from diofant.abc import s
>>> def f(u):
... return 2*u
...
>>> def g(u):
... return 2*sqrt(1 - u**2)
...
>>> f(cos(x))
2*cos(x)
>>> g(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> f(c).diff(c)
2
>>> f(c).diff(c)
2
>>> g(s).diff(c)
0
>>> g(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs({c: cos(x)}) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs({s: sin(x)}) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in Diofant, because
expr.subs({Function: Symbol}) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is the same notational convenience used in the Euler-Lagrange method
when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is
that the expression in question is represented by some F(t, u, v) at u =
f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u,
v).diff(u) at u = f(t).
We do not allow derivatives to be taken with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in Diofant is defined using unevaluated
Subs objects::
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, 2*g(x)))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, g(x)))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> diff(f(x), x).diff(f(x))
0
The same is true for derivatives of different orders::
>>> diff(f(x), (x, 2)).diff(diff(f(x), (x, 1)))
0
>>> diff(f(x), (x, 1)).diff(diff(f(x), (x, 2)))
0
Note, any class can allow derivatives to be taken with respect to itself.
Examples
========
Some basic examples:
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x, y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), (x, 3))
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1, g(x)))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(x**2, x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *args, **assumptions):
from .symbol import Dummy
expr = sympify(expr)
# There are no args, we differentiate wrt all of the free symbols
# in expr.
if not args:
variables = expr.free_symbols
args = tuple(variables)
if len(variables) != 1:
from ..utilities.misc import filldedent
raise ValueError(filldedent("""
The variable(s) of differentiation
must be supplied to differentiate %s""" % expr))
# Standardize the args by sympifying them and making appending a
# count of 1 if there is only variable: diff(e, x) -> diff(e, (x, 1)).
args = list(sympify(args))
for i, a in enumerate(args):
if not isinstance(a, Tuple):
args[i] = (a, Integer(1))
variable_count = []
all_zero = True
for v, count in args:
if not v._diff_wrt:
from ..utilities.misc import filldedent
ordinal = 'st' if count == 1 else 'nd' if count == 2 else 'rd' if count == 3 else 'th'
raise ValueError(filldedent("""
Can\'t calculate %s%s derivative wrt %s.""" % (count, ordinal, v)))
if count:
if all_zero:
all_zero = False
variable_count.append(Tuple(v, count))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate:
symbol_set = {sc[0] for sc in variable_count if sc[0].is_Symbol}
if symbol_set.difference(expr.free_symbols):
return Integer(0)
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in range(count))
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too aggressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
# TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
nderivs = 0 # how many derivatives were performed
for v in variablegen:
is_symbol = v.is_Symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = Dummy(f'xi_{i:d}')
new_v.dummy_index = hash(v)
expr = expr.xreplace({v: new_v})
old_v = v
v = new_v
obj = expr._eval_derivative(v)
nderivs += 1
if not is_symbol:
if obj is not None:
if obj.is_Derivative and not old_v.is_Symbol:
# Derivative evaluated at a generic point, i.e.
# that is not a symbol.
obj = Subs(obj, (v, old_v))
else:
obj = obj.xreplace({v: old_v})
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj == 0:
return Integer(0)
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = cls(
expr.expr, *cls._sort_variables(expr.variables)
)
if nderivs > 1 and assumptions.get('simplify', True):
from ..simplify.simplify import signsimp
from .exprtools import factor_terms
expr = factor_terms(signsimp(expr))
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols don't commute.
Examples
========
>>> vsort = Derivative._sort_variables
>>> vsort((x, y, z))
[x, y, z]
>>> vsort((h(x), g(x), f(x)))
[f(x), g(x), h(x)]
>>> vsort((z, y, x, h(x), g(x), f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x, f(x), y, f(y)))
[x, f(x), y, f(y)]
>>> vsort((y, x, g(x), f(x), z, h(x), y, x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z, y, f(x), x, f(x), g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z, y, f(x), x, f(x), g(x), z, z, y, x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_Symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
return sorted_vars
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj == 0:
return Integer(0)
if isinstance(obj, Derivative):
return obj.func(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(obj, *self.variables, evaluate=True)
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return self.func(self.expr, *(self.variables + (v, )), evaluate=False)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
See Also
========
diofant.core.basic.Basic.doit
"""
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return self.func(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
import mpmath
from .expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs({z: Expr._from_mpmath(x, prec=mpmath.mp.prec)})
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec), strict=False)
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
"""Return expression."""
return self.args[0]
@property
def variables(self):
"""Return tuple of symbols, wrt derivative is taken."""
return self.args[1:]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new._diff_wrt:
# issue sympy/sympy#4719
return Subs(self, (old, new))
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
# Check if canonnical order of variables is equal.
old_vars = collections.Counter(old.variables)
self_vars = collections.Counter(self.variables)
if old_vars == self_vars:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all(a[i] <= b[i] for i in a)
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).elements())
return Derivative(*(x._subs(old, new) for x in self.args))
def _eval_lseries(self, x, logx):
for term in self.expr.series(x, n=None, logx=logx):
yield self.func(term, *self.variables)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
rv = [self.func(a, *self.variables) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
return self.func(self.expr.as_leading_term(x), *self.variables)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, variables, expr):
from ..sets.sets import FiniteSet
v = list(variables) if iterable(variables) else [variables]
for i in v:
if not getattr(i, 'is_Symbol', False):
raise TypeError(f'variable is not a symbol: {i}')
if len(v) == 1 and v[0] == expr:
return S.IdentityFunction
obj = Expr.__new__(cls, Tuple(*v), sympify(expr))
obj.nargs = FiniteSet(len(v))
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function."""
return self.args[0]
@property
def expr(self):
"""The return value of the function."""
return self.args[1]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
# XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
return self.expr.xreplace(dict(zip(self.variables, args)))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(zip(other.args[0], self.args[0])))
return selfexpr == otherexpr
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return self.expr.xreplace(self.canonical_variables),
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs`` receives at least 2 arguments: an expression, a pair of old
and new expression to substitute or several such pairs.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> e = Subs(f(x).diff(x), (x, y))
>>> e.subs({y: 0})
Subs(Derivative(f(x), x), (x, 0))
>>> e.subs({f: sin}).doit()
cos(y)
>>> Subs(f(x)*sin(y) + z, (x, 0), (y, 1))
Subs(z + f(x)*sin(y), (x, 0), (y, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, *args, **assumptions):
from .symbol import Symbol
args = sympify(args)
if len(args) and all(is_sequence(_) and len(_) == 2 for _ in args):
variables, point = zip(*args)
else:
raise ValueError('Subs support two or more arguments')
if tuple(uniq(variables)) != variables:
repeated = [ v for v in set(variables) if variables.count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = '_'
pts = sorted(set(point), key=default_sort_key)
from ..printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, 1), (_1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += '_'
continue
reps # XXX "peephole" optimization, http://bugs.python.org/issue2506
break
obj = Expr.__new__(cls, expr, *sympify(tuple(zip(variables, point))))
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return (self.expr.is_commutative and
all(p.is_commutative for p in self.point))
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
See Also
========
diofant.core.basic.Basic.doit
"""
return self.expr.doit(**hints).subs(list(zip(self.variables, self.point)))
def evalf(self, dps=15, **options):
"""Evaluate the given formula to an accuracy of dps decimal digits.
See Also
========
diofant.core.evalf.EvalfMixin.evalf
"""
return self.doit().evalf(dps, **options)
#:
n = evalf
@property
def variables(self):
"""The variables to be evaluated."""
return Tuple(*tuple(zip(*self.args[1:])))[0]
@property
def expr(self):
"""The expression on which the substitution operates."""
return self.args[0]
@property
def point(self):
"""The values for which the variables are to be substituted."""
return Tuple(*tuple(zip(*self.args[1:])))[1]
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
See Also
========
diofant.core.basic.Basic.free_symbols
"""
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._expr == other._expr
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return self._expr.xreplace(self.canonical_variables),
def _eval_subs(self, old, new):
if old in self.variables:
return self
if isinstance(old, Subs) and self.point == old.point:
if self.expr.subs(zip(self.variables, old.variables)) == old.expr:
return new
def _eval_derivative(self, s):
return Add((self.func(self.expr.diff(s), *self.args[1:]).doit()
if s not in self.variables else Integer(0)),
*[p.diff(s)*self.func(self.expr.diff(v), *self.args[1:]).doit()
for v, p in zip(self.variables, self.point)])
def diff(f, *args, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), (x, 3)) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), (x, 0)), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), (x, 3))
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), (x, 2), (y, 2))
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'diofant.core.function.Derivative'>
>>> type(diff(sin(x), (x, 0)))
sin
>>> type(diff(sin(x), (x, 0), evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
* https://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
diofant.geometry.util.idiff: computes the derivative implicitly
"""
kwargs.setdefault('evaluate', True)
return Derivative(f, *args, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp``. The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
Parameters
==========
basic : boolean, optional
This hint is used for any special
rewriting of an object that should be done automatically (along with
the other hints like ``mul``) when expand is called. This is a catch-all
hint to handle any sort of expansion that may not be described by
the existing hint names.
deep : boolean, optional
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
mul : boolean, optional
Distributes multiplication over addition (``):
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial : boolean, optional
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp : boolean, optional
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
E**x*E**y
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base : boolean, optional
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, Diofant performs
it automatically:
>>> (x*y)**2
x**2*y**2
log : boolean, optional
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
complex : boolean, optional
Split an expression into real and imaginary parts.
>>> x, y = symbols('x y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func : boolean : optional
Expand other functions.
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig : boolean, optional
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change.
force : boolean, optional
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
E**x*E**y*x + E**x*E**y*y
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
E**(x + y)*x + E**(x + y)*y
>>> (exp(x + y)*(x + y)).expand(mul=False)
E**x*E**y*(x + y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
E**x*E**(E**x*E**y)
>>> exp(x + exp(x + y)).expand(deep=False)
E**(E**(x + y))*E**x
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial``, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> x, y, z = symbols('x y z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
- Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``'s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... # Doubles the args of MyClass.
... # If there more than four args, doubling is not performed,
... # unless force=True is also used (False by default).
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func,
diofant.simplify.hyperexpand.hyperexpand
References
==========
* https://mathworld.wolfram.com/Multiple-AngleFormulas.html
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_sympyissue_8247_8354 in test_arit)
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
E**(x + y)*x*log(x*y**2) + E**(x + y)*y*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
2*E**(x + 1)*x + E**(2*x + 2) + x**2
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> x, y = symbols('x y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
E**(x + y)*(x + y)*(log(x) + 2*log(y))
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> expand_complex(exp(z))
E**re(z)*I*sin(im(z)) + E**re(z)*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
diofant.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*(E**y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
Examples
========
>>> expand_power_exp(x**(y + 2))
x**2*x**y
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by diofant into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq = x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from ..integrals import Integral
from ..logic.boolalg import BooleanFunction
from ..simplify.radsimp import fraction
from .symbol import Symbol
expr = sympify(expr)
if type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, Expr):
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
if a.is_Rational:
# -1/3 = NEG + DIV
if a != 1:
if a.numerator < 0:
ops.append(NEG)
if a.denominator != 1:
ops.append(DIV)
# XXX "peephole" optimization, http://bugs.python.org/issue2506
a
continue
elif a.is_Mul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] == -1:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
args.append(d)
continue # won't be -Mul but could be Add
elif d != 1:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
# XXX "peephole" optimization, http://bugs.python.org/issue2506
a
continue
elif isinstance(expr, BooleanFunction):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(expr.func.__name__.upper())
ops.append(o)
continue
if a.is_Pow and a.exp == -1:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif not isinstance(expr, Basic):
ops = []
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(a.func.__name__.upper())
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return Integer(0)
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from ..polys.rootoftools import RootOf
from .power import Pow
from .symbol import Dummy
if iterable(expr, exclude=(str,)):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
list(expr.items())])
return type(expr)([nfloat(a, n, exponent) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.evalf(n)
if rv.is_Number:
rv = Float(rv, n)
else:
pass # pure_complex(rv) is likely True
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue sympy/sympy#6393)
rv = rv.xreplace({ro: ro.evalf(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.evalf(n, strict=False)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
| bsd-3-clause |
ramcn/demo3 | venv/lib/python3.4/site-packages/django/template/loader.py | 48 | 6396 | import warnings
from django.utils.deprecation import RemovedInDjango20Warning
from . import engines
from .backends.django import DjangoTemplates
from .base import Origin, TemplateDoesNotExist
from .engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from .loaders import base
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def get_template(template_name, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
engines = _engine_list(using)
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# return engine.get_template(template_name) in Django 2.0.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
def select_template(template_name_list, dirs=_dirs_undefined, using=None):
"""
Loads and returns a template for one of the given names.
Tries names in order and returns the first template found.
Raises TemplateDoesNotExist if no such template exists.
"""
engines = _engine_list(using)
for template_name in template_name_list:
for engine in engines:
try:
# This is required for deprecating the dirs argument. Simply
# use engine.get_template(template_name) in Django 2.0.
if isinstance(engine, DjangoTemplates):
return engine.get_template(template_name, dirs)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its get_template "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
else:
return engine.get_template(template_name)
except TemplateDoesNotExist:
pass
if template_name_list:
raise TemplateDoesNotExist(', '.join(template_name_list))
else:
raise TemplateDoesNotExist("No template names provided")
def render_to_string(template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined,
request=None, using=None):
"""
Loads a template and renders it with a context. Returns a string.
template_name may be a string or a list of strings.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
if isinstance(template_name, (list, tuple)):
template = select_template(template_name, using=using)
else:
template = get_template(template_name, using=using)
return template.render(context, request)
else:
# Some deprecated arguments were passed - use the legacy code path
for engine in _engine_list(using):
try:
# This is required for deprecating properly arguments specific
# to Django templates. Remove Engine.render_to_string() at the
# same time as this code path in Django 2.0.
if isinstance(engine, DjangoTemplates):
if request is not None:
raise ValueError(
"render_to_string doesn't support the request argument "
"when some deprecated arguments are passed.")
continue
# Hack -- use the internal Engine instance of DjangoTemplates.
return engine.engine.render_to_string(
template_name, context, context_instance, dirs, dictionary)
elif context_instance is not _context_instance_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the context_instance argument." %
engine.name, stacklevel=2)
elif dirs is not _dirs_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dirs argument." % engine.name,
stacklevel=2)
elif dictionary is not _dictionary_undefined:
warnings.warn(
"Skipping template backend %s because its render_to_string "
"method doesn't support the dictionary argument." %
engine.name, stacklevel=2)
except TemplateDoesNotExist:
continue
if template_name:
if isinstance(template_name, (list, tuple)):
template_name = ', '.join(template_name)
raise TemplateDoesNotExist(template_name)
else:
raise TemplateDoesNotExist("No template names provided")
def _engine_list(using=None):
return engines.all() if using is None else [engines[using]]
class BaseLoader(base.Loader):
_accepts_engine_in_init = False
def __init__(self, *args, **kwargs):
warnings.warn(
"django.template.loader.BaseLoader was superseded by "
"django.template.loaders.base.Loader.",
RemovedInDjango20Warning, stacklevel=2)
super(BaseLoader, self).__init__(*args, **kwargs)
| mit |
eshasharma/mase | src/old/lib.py | 13 | 4501 | from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
"""
# Lib: Standard Utilities
Standard imports: used everywhere.
## Code Standards
Narrow code (52 chars, max); use ``i'', not ``self'', set indent to two characters,
In a repo (or course). Markdown comments (which means we can do tricks like auto-generating this
documentation from comments in the file).
Not Python3, but use Python3 headers.
good reseraoiuces for advance people: Norving's infrenqencly asked questions
David Isaacon's Pything tips, tricks, and Hacks.http://www.siafoo.net/article/52
Environemnt that supports matplotlib, scikitlearn. Easy to get there.
Old school: install linux. New school: install virtualbox. Newer school: work online.
To checn if you ahve a suseful envorunment, try the following (isntall pip, matpolotlib, scikitlearn)
Learn Python.
Learn tdd
Attitude to coding. not code byt"set yourself up to et rapid feedback on some issue"
"""
import random, pprint, re, datetime, time,traceback
from contextlib import contextmanager
import pprint,sys
"""
Unit test engine, inspired by Kent Beck.
"""
def ok(*lst):
for one in lst: unittest(one)
return one
class unittest:
tries = fails = 0 # tracks the record so far
@staticmethod
def score():
t = unittest.tries
f = unittest.fails
return "# TRIES= %s FAIL= %s %%PASS = %s%%" % (
t,f,int(round(t*100/(t+f+0.001))))
def __init__(i,test):
unittest.tries += 1
try:
test()
except Exception,e:
unittest.fails += 1
i.report(e,test)
def report(i,e,test):
print(traceback.format_exc())
print(unittest.score(),':',test.__name__, e)
"""
Simple container class (offers simple initialization).
"""
class o:
def __init__(i,**d) : i + d
def __add__(i,d) : i.__dict__.update(d)
def __setitem__(i,k,v) : i.__dict__[k] = v
def __getitem__(i,k) : return i.__dict__[k]
def __repr__(i) : return str(i.items())
def items(i,x=None) :
x = x or i
if isinstance(x,o):
return [(k,i.items(v)) for
k,v in x.__dict__.values()
if not k[0] == "_" ]
else: return x
"""
The settings system.
"""
the = o()
def setting(f):
name = f.__name__
def wrapper(**d):
tmp = f()
tmp + d
the[name] = tmp
return tmp
wrapper()
return wrapper
@setting
def LIB(): return o(
seed = 1,
has = o(decs = 3,
skip="_",
wicked=True),
show = o(indent=2,
width=80)
)
#-------------------------------------------------
r = random.random
any = random.choice
seed = random.seed
isa = isinstance
def lt(x,y): return x < y
def gt(x,y): return x > y
def first(lst): return lst[0]
def last(lst): return lst[-1]
def shuffle(lst):
random.shuffle(lst)
return lst
def ntiles(lst, tiles=[0.1,0.3,0.5,0.7,0.9],
norm=False, f=3):
if norm:
lo,hi = lst[0], lst[-1]
lst= g([(x - lo)/(hi-lo+0.0001) for x in lst],f)
at = lambda x: lst[ int(len(lst)*x) ]
lst = [ at(tile) for tile in tiles ]
return lst
def say(*lst):
sys.stdout.write(', '.join(map(str,lst)))
sys.stdout.flush()
def g(lst,f=3):
return map(lambda x: round(x,f),lst)
#-------------------------------------------------
def show(x, indent=None, width=None):
print(pprint.pformat(has(x),
indent= indent or the.LIB.show.indent,
width = width or the.LIB.show.width))
def cache(f):
name = f.__name__
def wrapper(i):
i._cache = i._cache or {}
key = (name, i.id)
if key in i._cache:
x = i._cache[key]
else:
x = f(i) # sigh, gonna have to call it
i._cache[key] = x # ensure ache holds 'c'
return x
return wrapper
@contextmanager
def duration():
t1 = time.time()
yield
t2 = time.time()
print("\n" + "-" * 72)
print("# Runtime: %.3f secs" % (t2-t1))
def use(x,**y): return (x,y)
@contextmanager
def settings(*usings):
for (using, override) in usings:
using(**override)
yield
for (using,_) in usings:
using()
@contextmanager
def study(what,*usings):
print("\n#" + "-" * 50,
"\n#", what, "\n#",
datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"))
for (using, override) in usings:
using(**override)
seed(the.LIB.seed)
show(the)
with duration():
yield
for (using,_) in usings:
using()
| unlicense |
gangadhar-kadam/mic-erpnext | patches/december_2012/production_cleanup.py | 6 | 1858 | import webnotes
def execute():
delete_doctypes()
rename_module()
cleanup_bom()
rebuild_exploded_bom()
def delete_doctypes():
from webnotes.model import delete_doc
delete_doc("DocType", "Production Control")
delete_doc("DocType", "BOM Control")
def rename_module():
webnotes.reload_doc("core", "doctype", "role")
webnotes.reload_doc("core", "doctype", "page")
webnotes.reload_doc("core", "doctype", "module_def")
if webnotes.conn.exists("Role", "Production User"):
webnotes.rename_doc("Role", "Production User", "Manufacturing User")
if webnotes.conn.exists("Role", "Production Manager"):
webnotes.rename_doc("Role", "Production Manager", "Manufacturing Manager")
if webnotes.conn.exists("Page", "manufacturing-home"):
webnotes.delete_doc("Page", "production-home")
else:
webnotes.rename_doc("Page", "production-home", "manufacturing-home")
if webnotes.conn.exists("Module Def", "Production"):
webnotes.rename_doc("Module Def", "Production", "Manufacturing")
modules_list = webnotes.conn.get_global('modules_list')
if modules_list:
webnotes.conn.set_global("modules_list", modules_list.replace("Production",
"Manufacturing"))
# set end of life to null if "0000-00-00"
webnotes.conn.sql("""update `tabItem` set end_of_life=null where end_of_life='0000-00-00'""")
def rebuild_exploded_bom():
from webnotes.model.code import get_obj
for bom in webnotes.conn.sql("""select name from `tabBOM` where docstatus < 2"""):
get_obj("BOM", bom[0], with_children=1).on_update()
def cleanup_bom():
webnotes.conn.sql("""UPDATE `tabBOM` SET is_active = 1 where ifnull(is_active, 'No') = 'Yes'""")
webnotes.conn.sql("""UPDATE `tabBOM` SET is_active = 0 where ifnull(is_active, 'No') = 'No'""")
webnotes.reload_doc("manufacturing", "doctype", "bom")
webnotes.conn.sql("""update `tabBOM` set with_operations = 1""")
| agpl-3.0 |
zzicewind/nova | nova/virt/hyperv/snapshotops.py | 25 | 5816 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM snapshot operations.
"""
import os
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import task_states
from nova.i18n import _LW
from nova.image import glance
from nova import utils
from nova.virt.hyperv import utilsfactory
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SnapshotOps(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _save_glance_image(self, context, image_id, image_vhd_path):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, image_id)
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, image_id, update_task_state):
# While the snapshot operation is not synchronized within the manager,
# attempting to destroy an instance while it's being snapshoted fails.
@utils.synchronized(instance.uuid)
def instance_synchronized_snapshot():
self._snapshot(context, instance, image_id, update_task_state)
instance_synchronized_snapshot()
def _snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance.name
LOG.debug("Creating snapshot for instance %s", instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_dir = None
try:
src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
LOG.debug("Getting info for VHD %s", src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
export_dir = self._pathutils.get_export_dir(instance_name)
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s',
{'src_vhd_path': src_vhd_path,
'dest_vhd_path': dest_vhd_path})
self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s',
{'src_vhd_path': src_vhd_path,
'dest_base_disk_path': dest_base_disk_path})
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s",
{'image_id': image_id, 'image_vhd_path': image_vhd_path})
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, image_id, image_vhd_path)
LOG.debug("Snapshot image %(image_id)s updated for VM "
"%(instance_name)s",
{'image_id': image_id, 'instance_name': instance_name})
finally:
try:
LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_LW('Failed to remove snapshot for VM %s'),
instance_name)
if export_dir:
LOG.debug('Removing directory: %s', export_dir)
self._pathutils.rmtree(export_dir)
| apache-2.0 |
khkaminska/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
ishalyminov/memn2n | tf_config.py | 1 | 1480 | import tensorflow as tf
def configure(in_config):
tf.flags.DEFINE_float(
'learning_rate',
in_config['learning_rate'],
'Learning rate for Adam Optimizer'
)
tf.flags.DEFINE_float(
'epsilon',
in_config['epsilon'],
'Epsilon value for Adam Optimizer'
)
tf.flags.DEFINE_float(
'max_grad_norm',
in_config['max_grad_norm'],
'Clip gradients to this norm')
tf.flags.DEFINE_integer(
'evaluation_interval',
in_config['evaluation_interval'],
"Evaluate and print results every x epochs"
)
tf.flags.DEFINE_integer(
'batch_size',
in_config['batch_size'],
'Batch size for training'
)
tf.flags.DEFINE_integer(
'hops',
in_config['hops'],
'Number of hops in the Memory Network'
)
tf.flags.DEFINE_integer(
'epochs',
in_config['epochs'],
'Number of epochs to train for'
)
tf.flags.DEFINE_integer(
'embedding_size',
in_config['embedding_size'],
'Embedding size for embedding matrices'
)
tf.flags.DEFINE_integer(
'memory_size',
in_config['memory_size'],
'Maximum size of memory'
)
tf.flags.DEFINE_integer(
'task_id',
in_config['task_id'],
"bAbI task id, 1 <= id <= 6"
)
tf.flags.DEFINE_integer(
'random_state',
in_config['random_state'],
'Random state'
) | mit |
hbrunn/OpenUpgrade | addons/website_event/models/event.py | 44 | 5246 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import re
from openerp.addons.website.models.website import slug
class event(osv.osv):
_name = 'event.event'
_inherit = ['event.event','website.seo.metadata']
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
todo = [
(_('Introduction'), 'website_event.template_intro'),
(_('Location'), 'website_event.template_location')
]
web = self.pool.get('website')
result = []
for name,path in todo:
name2 = name+' '+event.name
newpath = web.new_page(cr, uid, name2, path, ispage=False, context=context)
url = "/event/"+slug(event)+"/page/" + newpath
result.append((name, url))
return result
def _set_show_menu(self, cr, uid, ids, name, value, arg, context=None):
menuobj = self.pool.get('website.menu')
eventobj = self.pool.get('event.event')
for event in self.browse(cr, uid, [ids], context=context):
if event.menu_id and not value:
menuobj.unlink(cr, uid, [event.menu_id.id], context=context)
elif value and not event.menu_id:
root = menuobj.create(cr, uid, {
'name': event.name
}, context=context)
tocreate = self._get_new_menu_pages(cr, uid, event, context)
tocreate.append((_('Register'), '/event/%s/register' % slug(event)))
sequence = 0
for name,url in tocreate:
menuobj.create(cr, uid, {
'name': name,
'url': url,
'parent_id': root,
'sequence': sequence
}, context=context)
sequence += 1
eventobj.write(cr, uid, [event.id], {'menu_id': root}, context=context)
return True
def _get_show_menu(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = bool(event.menu_id)
return res
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = "/event/" + slug(event)
return res
def _default_hashtag(self, cr, uid, context={}):
name = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.name
return re.sub("[- \\.\\(\\)\\@\\#\\&]+", "", name).lower()
_columns = {
'twitter_hashtag': fields.char('Twitter Hashtag'),
'website_published': fields.boolean('Visible in Website'),
# TDE TODO FIXME: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'show_menu': fields.function(_get_show_menu, fnct_inv=_set_show_menu, type='boolean', string='Dedicated Menu'),
'menu_id': fields.many2one('website.menu', 'Event Menu'),
}
_defaults = {
'show_menu': False,
'twitter_hashtag': _default_hashtag
}
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
event = self.browse(cr, uid, ids[0], context=context)
if event.address_id:
return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_img()
return None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
event = self.browse(cr, uid, ids[0], context=context)
if event.address_id:
return self.browse(cr, SUPERUSER_ID, ids[0], context=context).address_id.google_map_link()
return None
| agpl-3.0 |
kaixinjxq/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_script-src_cross-origin_unsafe-inline.py | 30 | 3230 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "script-src " + url1 + " 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_script-src_cross-origin_unsafe-inline</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src='""" + url1 + """/tests/resources/testharness.js'></script>
<script src='""" + url1 + """/tests/resources/testharnessreport.js'></script>
</head>
<body>
<div id="log"></div>
<script src='""" + url1 + """/tests/csp/support/test81.js'></script>
<script src='""" + url2 + """/tests/csp/support/test83.js'></script>
<script src="support/csp.js"></script>
<script>
test(function() {
assert_true(typeof X != "number", "attribute defined internal");
}, document.title + "_blocked");
test(function() {
assert_true(typeof getVideoURI == "function", "Function getVideoURI is defined");
}, document.title + "_allowed");
test(function() {
assert_true(typeof q == "undefined", "Function getVideoURI is defined");
}, document.title + "_blocked_ext");
</script>
</body>
</html> """
| bsd-3-clause |
Deepakkothandan/ansible | lib/ansible/modules/network/cloudengine/ce_bgp_neighbor_af.py | 27 | 109137 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_bgp_neighbor_af
version_added: "2.4"
short_description: Manages BGP neighbor Address-family configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP neighbor Address-family configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
The BGP instance can be used only after the corresponding VPN instance is created.
required: true
af_type:
description:
- Address family type of a BGP instance.
required: true
choices: ['ipv4uni', 'ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn']
remote_address:
description:
- IPv4 or IPv6 peer connection address.
required: true
advertise_irb:
description:
- If the value is true, advertised IRB routes are distinguished.
If the value is false, advertised IRB routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_arp:
description:
- If the value is true, advertised ARP routes are distinguished.
If the value is false, advertised ARP routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_remote_nexthop:
description:
- If the value is true, the remote next-hop attribute is advertised to peers.
If the value is false, the remote next-hop attribute is not advertised to any peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_community:
description:
- If the value is true, the community attribute is advertised to peers.
If the value is false, the community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_ext_community:
description:
- If the value is true, the extended community attribute is advertised to peers.
If the value is false, the extended community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
discard_ext_community:
description:
- If the value is true, the extended community attribute in the peer route information is discarded.
If the value is false, the extended community attribute in the peer route information is not discarded.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_enable:
description:
- If the value is true, repetitive local AS numbers are allowed.
If the value is false, repetitive local AS numbers are not allowed.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_limit:
description:
- Set the maximum number of repetitive local AS number.
The value is an integer ranging from 1 to 10.
required: false
default: null
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups)
after BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers
and pass the configured import policy.
required: false
default: no_use
choices: ['no_use','true','false']
nexthop_configure:
description:
- null, The next hop is not changed.
local, The next hop is changed to the local IP address.
invariable, Prevent the device from changing the next hop of each imported IGP route
when advertising it to its BGP peers.
required: false
default: null
choices: ['null', 'local', 'invariable']
preferred_value:
description:
- Assign a preferred value for the routes learned from a specified peer.
The value is an integer ranging from 0 to 65535.
required: false
default: null
public_as_only:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_force:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_limited:
description:
- Limited use public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_replace:
description:
- Private as replaced by public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_skip_peer_as:
description:
- Public as only skip peer as.
required: false
default: no_use
choices: ['no_use','true','false']
route_limit:
description:
- Configure the maximum number of routes that can be accepted from a peer.
The value is an integer ranging from 1 to 4294967295.
required: false
default: null
route_limit_percent:
description:
- Specify the percentage of routes when a router starts to generate an alarm.
The value is an integer ranging from 1 to 100.
required: false
default: null
route_limit_type:
description:
- Noparameter, After the number of received routes exceeds the threshold and the timeout
timer expires,no action.
AlertOnly, An alarm is generated and no additional routes will be accepted if the maximum
number of routes allowed have been received.
IdleForever, The connection that is interrupted is not automatically re-established if the
maximum number of routes allowed have been received.
IdleTimeout, After the number of received routes exceeds the threshold and the timeout timer
expires, the connection that is interrupted is automatically re-established.
required: false
default: null
choices: ['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']
route_limit_idle_timeout:
description:
- Specify the value of the idle-timeout timer to automatically reestablish the connections after
they are cut off when the number of routes exceeds the set threshold.
The value is an integer ranging from 1 to 1200.
required: false
default: null
rt_updt_interval:
description:
- Specify the minimum interval at which Update packets are sent. The value is an integer, in seconds.
The value is an integer ranging from 0 to 600.
required: false
default: null
redirect_ip:
description:
- Redirect ip.
required: false
default: no_use
choices: ['no_use','true','false']
redirect_ip_vaildation:
description:
- Redirect ip vaildation.
required: false
default: no_use
choices: ['no_use','true','false']
reflect_client:
description:
- If the value is true, the local device functions as the route reflector and a peer functions
as a client of the route reflector.
If the value is false, the route reflector and client functions are not configured.
required: false
default: no_use
choices: ['no_use','true','false']
substitute_as_enable:
description:
- If the value is true, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is enabled.
If the value is false, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is disabled.
required: false
default: no_use
choices: ['no_use','true','false']
import_rt_policy_name:
description:
- Specify the filtering policy applied to the routes learned from a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
export_rt_policy_name:
description:
- Specify the filtering policy applied to the routes to be advertised to a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
import_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes received from a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
export_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes to be advertised to a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
import_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes received from a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
export_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes to be advertised to a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
import_as_path_name_or_num:
description:
- A routing strategy based on the AS path list for routing received by a designated peer.
required: false
default: null
export_as_path_name_or_num:
description:
- Application of a AS path list based filtering policy to the routing of a specified peer.
required: false
default: null
import_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes received from a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
export_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes to be advertised to a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
ipprefix_orf_enable:
description:
- If the value is true, the address prefix-based Outbound Route Filter (ORF) capability is
enabled for peers.
If the value is false, the address prefix-based Outbound Route Filter (ORF) capability is
disabled for peers.
required: false
default: no_use
choices: ['no_use','true','false']
is_nonstd_ipprefix_mod:
description:
- If the value is true, Non-standard capability codes are used during capability negotiation.
If the value is false, RFC-defined standard ORF capability codes are used during capability negotiation.
required: false
default: no_use
choices: ['no_use','true','false']
orftype:
description:
- ORF Type.
The value is an integer ranging from 0 to 65535.
required: false
default: null
orf_mode:
description:
- ORF mode.
null, Default value.
receive, ORF for incoming packets.
send, ORF for outgoing packets.
both, ORF for incoming and outgoing packets.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
soostring:
description:
- Configure the Site-of-Origin (SoO) extended community attribute.
The value is a string of 3 to 21 characters.
required: false
default: null
default_rt_adv_enable:
description:
- If the value is true, the function to advertise default routes to peers is enabled.
If the value is false, the function to advertise default routes to peers is disabled.
required: false
default: no_use
choices: ['no_use','true', 'false']
default_rt_adv_policy:
description:
- Specify the name of a used policy. The value is a string.
The value is a string of 1 to 40 characters.
required: false
default: null
default_rt_match_mode:
description:
- null, Null.
matchall, Advertise the default route if all matching conditions are met.
matchany, Advertise the default route if any matching condition is met.
required: false
default: null
choices: ['null', 'matchall', 'matchany']
add_path_mode:
description:
- null, Null.
receive, Support receiving Add-Path routes.
send, Support sending Add-Path routes.
both, Support receiving and sending Add-Path routes.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
adv_add_path_num:
description:
- The number of addPath advertise route.
The value is an integer ranging from 2 to 64.
required: false
default: null
origin_as_valid:
description:
- If the value is true, Application results of route announcement.
If the value is false, Routing application results are not notified.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_enable:
description:
- If the value is true, vpls enable.
If the value is false, vpls disable.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_ad_disable:
description:
- If the value is true, enable vpls-ad.
If the value is false, disable vpls-ad.
required: false
default: no_use
choices: ['no_use','true', 'false']
update_pkt_standard_compatible:
description:
- If the value is true, When the vpnv4 multicast neighbor receives and updates the message,
the message has no label.
If the value is false, When the vpnv4 multicast neighbor receives and updates the message,
the message has label.
required: false
default: no_use
choices: ['no_use','true', 'false']
'''
EXAMPLES = '''
- name: CloudEngine BGP neighbor address family test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config BGP peer Address_Family"
ce_bgp_neighbor_af:
state: present
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
- name: "Undo BGP peer Address_Family"
ce_bgp_neighbor_af:
state: absent
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"af_type": "ipv4uni", "nexthop_configure": "local",
"remote_address": "192.168.10.10",
"state": "present", "vrf_name": "js"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "null",
"vrf_name": "js"}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "local",
"vrf_name": "js"}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["peer 192.168.10.10 next-hop-local"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
# get bgp peer af
CE_GET_BGP_PEER_AF_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF>
<remoteAddress></remoteAddress>
"""
CE_GET_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp peer af
CE_MERGE_BGP_PEER_AF_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="merge">
<remoteAddress>%s</remoteAddress>
"""
CE_MERGE_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp peer af
CE_CREATE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="create">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp peer af
CE_DELETE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="delete">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
class BgpNeighborAf(object):
""" Manages BGP neighbor Address-family configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_neighbor_af_args(self, **kwargs):
""" check_bgp_neighbor_af_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
state = module.params['state']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
if not check_ip_addr(ipaddr=remote_address):
module.fail_json(
msg='Error: The remote_address %s is invalid.' % remote_address)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != remote_address:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] == remote_address:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_neighbor_af_other(self, **kwargs):
""" check_bgp_neighbor_af_other """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
if state == "absent":
result["need_cfg"] = need_cfg
return result
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseIrb></advertiseIrb>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseIrb>(.*)</advertiseIrb>.*', recv_xml)
if re_find:
result["advertise_irb"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_irb:
need_cfg = True
else:
need_cfg = True
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseArp></advertiseArp>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseArp>(.*)</advertiseArp>.*', recv_xml)
if re_find:
result["advertise_arp"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_arp:
need_cfg = True
else:
need_cfg = True
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseRemoteNexthop></advertiseRemoteNexthop>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseRemoteNexthop>(.*)</advertiseRemoteNexthop>.*', recv_xml)
if re_find:
result["advertise_remote_nexthop"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_remote_nexthop:
need_cfg = True
else:
need_cfg = True
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseCommunity></advertiseCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseCommunity>(.*)</advertiseCommunity>.*', recv_xml)
if re_find:
result["advertise_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_community:
need_cfg = True
else:
need_cfg = True
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseExtCommunity></advertiseExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseExtCommunity>(.*)</advertiseExtCommunity>.*', recv_xml)
if re_find:
result["advertise_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_ext_community:
need_cfg = True
else:
need_cfg = True
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<discardExtCommunity></discardExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<discardExtCommunity>(.*)</discardExtCommunity>.*', recv_xml)
if re_find:
result["discard_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != discard_ext_community:
need_cfg = True
else:
need_cfg = True
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopEnable></allowAsLoopEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopEnable>(.*)</allowAsLoopEnable>.*', recv_xml)
if re_find:
result["allow_as_loop_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_enable:
need_cfg = True
else:
need_cfg = True
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
if int(allow_as_loop_limit) > 10 or int(allow_as_loop_limit) < 1:
module.fail_json(
msg='the value of allow_as_loop_limit %s is out of [1 - 10].' % allow_as_loop_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopLimit></allowAsLoopLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopLimit>(.*)</allowAsLoopLimit>.*', recv_xml)
if re_find:
result["allow_as_loop_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_limit:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<nextHopConfigure></nextHopConfigure>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<nextHopConfigure>(.*)</nextHopConfigure>.*', recv_xml)
if re_find:
result["nexthop_configure"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != nexthop_configure:
need_cfg = True
else:
need_cfg = True
preferred_value = module.params['preferred_value']
if preferred_value:
if int(preferred_value) > 65535 or int(preferred_value) < 0:
module.fail_json(
msg='the value of preferred_value %s is out of [0 - 65535].' % preferred_value)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<preferredValue></preferredValue>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<preferredValue>(.*)</preferredValue>.*', recv_xml)
if re_find:
result["preferred_value"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != preferred_value:
need_cfg = True
else:
need_cfg = True
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnly></publicAsOnly>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnly>(.*)</publicAsOnly>.*', recv_xml)
if re_find:
result["public_as_only"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only:
need_cfg = True
else:
need_cfg = True
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyForce></publicAsOnlyForce>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyForce>(.*)</publicAsOnlyForce>.*', recv_xml)
if re_find:
result["public_as_only_force"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_force:
need_cfg = True
else:
need_cfg = True
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyLimited></publicAsOnlyLimited>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyLimited>(.*)</publicAsOnlyLimited>.*', recv_xml)
if re_find:
result["public_as_only_limited"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_limited:
need_cfg = True
else:
need_cfg = True
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyReplace></publicAsOnlyReplace>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyReplace>(.*)</publicAsOnlyReplace>.*', recv_xml)
if re_find:
result["public_as_only_replace"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_replace:
need_cfg = True
else:
need_cfg = True
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlySkipPeerAs></publicAsOnlySkipPeerAs>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlySkipPeerAs>(.*)</publicAsOnlySkipPeerAs>.*', recv_xml)
if re_find:
result["public_as_only_skip_peer_as"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_skip_peer_as:
need_cfg = True
else:
need_cfg = True
route_limit = module.params['route_limit']
if route_limit:
if int(route_limit) < 1:
module.fail_json(
msg='the value of route_limit %s is out of [1 - 4294967295].' % route_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimit></routeLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimit>(.*)</routeLimit>.*', recv_xml)
if re_find:
result["route_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit:
need_cfg = True
else:
need_cfg = True
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
if int(route_limit_percent) < 1 or int(route_limit_percent) > 100:
module.fail_json(
msg='Error: The value of route_limit_percent %s is out of [1 - 100].' % route_limit_percent)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitPercent></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitPercent>(.*)</routeLimitPercent>.*', recv_xml)
if re_find:
result["route_limit_percent"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_percent:
need_cfg = True
else:
need_cfg = True
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitType></routeLimitType>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitType>(.*)</routeLimitType>.*', recv_xml)
if re_find:
result["route_limit_type"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_type:
need_cfg = True
else:
need_cfg = True
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
if int(route_limit_idle_timeout) < 1 or int(route_limit_idle_timeout) > 1200:
module.fail_json(
msg='Error: The value of route_limit_idle_timeout %s is out of '
'[1 - 1200].' % route_limit_idle_timeout)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitIdleTimeout></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitIdleTimeout>(.*)</routeLimitIdleTimeout>.*', recv_xml)
if re_find:
result["route_limit_idle_timeout"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_idle_timeout:
need_cfg = True
else:
need_cfg = True
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
if int(rt_updt_interval) < 0 or int(rt_updt_interval) > 600:
module.fail_json(
msg='Error: The value of rt_updt_interval %s is out of [0 - 600].' % rt_updt_interval)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<rtUpdtInterval></rtUpdtInterval>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<rtUpdtInterval>(.*)</rtUpdtInterval>.*', recv_xml)
if re_find:
result["rt_updt_interval"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != rt_updt_interval:
need_cfg = True
else:
need_cfg = True
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIP></redirectIP>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIP>(.*)</redirectIP>.*', recv_xml)
if re_find:
result["redirect_ip"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip:
need_cfg = True
else:
need_cfg = True
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIPVaildation></redirectIPVaildation>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIPVaildation>(.*)</redirectIPVaildation>.*', recv_xml)
if re_find:
result["redirect_ip_vaildation"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip_vaildation:
need_cfg = True
else:
need_cfg = True
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<reflectClient></reflectClient>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<reflectClient>(.*)</reflectClient>.*', recv_xml)
if re_find:
result["reflect_client"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != reflect_client:
need_cfg = True
else:
need_cfg = True
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<substituteAsEnable></substituteAsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<substituteAsEnable>(.*)</substituteAsEnable>.*', recv_xml)
if re_find:
result["substitute_as_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != substitute_as_enable:
need_cfg = True
else:
need_cfg = True
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
if len(import_rt_policy_name) < 1 or len(import_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of import_rt_policy_name %s is out of [1 - 40].' % import_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importRtPolicyName></importRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importRtPolicyName>(.*)</importRtPolicyName>.*', recv_xml)
if re_find:
result["import_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_rt_policy_name:
need_cfg = True
else:
need_cfg = True
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
if len(export_rt_policy_name) < 1 or len(export_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of export_rt_policy_name %s is out of [1 - 40].' % export_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportRtPolicyName></exportRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportRtPolicyName>(.*)</exportRtPolicyName>.*', recv_xml)
if re_find:
result["export_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_rt_policy_name:
need_cfg = True
else:
need_cfg = True
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
if len(import_pref_filt_name) < 1 or len(import_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of import_pref_filt_name %s is out of [1 - 169].' % import_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importPrefFiltName></importPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importPrefFiltName>(.*)</importPrefFiltName>.*', recv_xml)
if re_find:
result["import_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_pref_filt_name:
need_cfg = True
else:
need_cfg = True
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
if len(export_pref_filt_name) < 1 or len(export_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of export_pref_filt_name %s is out of [1 - 169].' % export_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportPrefFiltName></exportPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportPrefFiltName>(.*)</exportPrefFiltName>.*', recv_xml)
if re_find:
result["export_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_pref_filt_name:
need_cfg = True
else:
need_cfg = True
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
if int(import_as_path_filter) < 1 or int(import_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of import_as_path_filter %s is out of [1 - 256].' % import_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathFilter></importAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathFilter>(.*)</importAsPathFilter>.*', recv_xml)
if re_find:
result["import_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_filter:
need_cfg = True
else:
need_cfg = True
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
if int(export_as_path_filter) < 1 or int(export_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of export_as_path_filter %s is out of [1 - 256].' % export_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathFilter></exportAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathFilter>(.*)</exportAsPathFilter>.*', recv_xml)
if re_find:
result["export_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_filter:
need_cfg = True
else:
need_cfg = True
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
if len(import_as_path_name_or_num) < 1 or len(import_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of import_as_path_name_or_num %s is out '
'of [1 - 51].' % import_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathNameOrNum></importAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathNameOrNum>(.*)</importAsPathNameOrNum>.*', recv_xml)
if re_find:
result["import_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
if len(export_as_path_name_or_num) < 1 or len(export_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of export_as_path_name_or_num %s is out '
'of [1 - 51].' % export_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathNameOrNum></exportAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathNameOrNum>(.*)</exportAsPathNameOrNum>.*', recv_xml)
if re_find:
result["export_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
if len(import_acl_name_or_num) < 1 or len(import_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of import_acl_name_or_num %s is out of [1 - 32].' % import_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAclNameOrNum></importAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAclNameOrNum>(.*)</importAclNameOrNum>.*', recv_xml)
if re_find:
result["import_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
if len(export_acl_name_or_num) < 1 or len(export_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of export_acl_name_or_num %s is out of [1 - 32].' % export_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAclNameOrNum></exportAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAclNameOrNum>(.*)</exportAclNameOrNum>.*', recv_xml)
if re_find:
result["export_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<ipprefixOrfEnable></ipprefixOrfEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ipprefixOrfEnable>(.*)</ipprefixOrfEnable>.*', recv_xml)
if re_find:
result["ipprefix_orf_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != ipprefix_orf_enable:
need_cfg = True
else:
need_cfg = True
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<isNonstdIpprefixMod></isNonstdIpprefixMod>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isNonstdIpprefixMod>(.*)</isNonstdIpprefixMod>.*', recv_xml)
if re_find:
result["is_nonstd_ipprefix_mod"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != is_nonstd_ipprefix_mod:
need_cfg = True
else:
need_cfg = True
orftype = module.params['orftype']
if orftype:
if int(orftype) < 0 or int(orftype) > 65535:
module.fail_json(
msg='Error: The value of orftype %s is out of [0 - 65535].' % orftype)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orftype></orftype>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orftype>(.*)</orftype>.*', recv_xml)
if re_find:
result["orftype"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orftype:
need_cfg = True
else:
need_cfg = True
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orfMode></orfMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orfMode>(.*)</orfMode>.*', recv_xml)
if re_find:
result["orf_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orf_mode:
need_cfg = True
else:
need_cfg = True
soostring = module.params['soostring']
if soostring:
if len(soostring) < 3 or len(soostring) > 21:
module.fail_json(
msg='Error: The len of soostring %s is out of [3 - 21].' % soostring)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<soostring></soostring>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<soostring>(.*)</soostring>.*', recv_xml)
if re_find:
result["soostring"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != soostring:
need_cfg = True
else:
need_cfg = True
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvEnable></defaultRtAdvEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvEnable>(.*)</defaultRtAdvEnable>.*', recv_xml)
if re_find:
result["default_rt_adv_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_enable:
need_cfg = True
else:
need_cfg = True
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
if len(default_rt_adv_policy) < 1 or len(default_rt_adv_policy) > 40:
module.fail_json(
msg='Error: The len of default_rt_adv_policy %s is out of [1 - 40].' % default_rt_adv_policy)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvPolicy></defaultRtAdvPolicy>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvPolicy>(.*)</defaultRtAdvPolicy>.*', recv_xml)
if re_find:
result["default_rt_adv_policy"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_policy:
need_cfg = True
else:
need_cfg = True
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtMatchMode></defaultRtMatchMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtMatchMode>(.*)</defaultRtMatchMode>.*', recv_xml)
if re_find:
result["default_rt_match_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_match_mode:
need_cfg = True
else:
need_cfg = True
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<addPathMode></addPathMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<addPathMode>(.*)</addPathMode>.*', recv_xml)
if re_find:
result["add_path_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != add_path_mode:
need_cfg = True
else:
need_cfg = True
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
if int(orftype) < 2 or int(orftype) > 64:
module.fail_json(
msg='Error: The value of adv_add_path_num %s is out of [2 - 64].' % adv_add_path_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advAddPathNum></advAddPathNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advAddPathNum>(.*)</advAddPathNum>.*', recv_xml)
if re_find:
result["adv_add_path_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != adv_add_path_num:
need_cfg = True
else:
need_cfg = True
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<originAsValid></originAsValid>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<originAsValid>(.*)</originAsValid>.*', recv_xml)
if re_find:
result["origin_as_valid"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != origin_as_valid:
need_cfg = True
else:
need_cfg = True
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsEnable></vplsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsEnable>(.*)</vplsEnable>.*', recv_xml)
if re_find:
result["vpls_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_enable:
need_cfg = True
else:
need_cfg = True
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsAdDisable></vplsAdDisable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsAdDisable>(.*)</vplsAdDisable>.*', recv_xml)
if re_find:
result["vpls_ad_disable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_ad_disable:
need_cfg = True
else:
need_cfg = True
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<updatePktStandardCompatible></updatePktStandardCompatible>" + \
CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<updatePktStandardCompatible>(.*)</updatePktStandardCompatible>.*', recv_xml)
if re_find:
result["update_pkt_standard_compatible"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != update_pkt_standard_compatible:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_bgp_peer_af(self, **kwargs):
""" merge_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address) + CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def create_bgp_peer_af(self, **kwargs):
""" create_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_CREATE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def delete_bgp_peer_af(self, **kwargs):
""" delete_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_DELETE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "undo peer %s" % remote_address
cmds.append(cmd)
return cmds
def merge_bgp_peer_af_other(self, **kwargs):
""" merge_bgp_peer_af_other """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address)
cmds = []
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str += "<advertiseIrb>%s</advertiseIrb>" % advertise_irb
if advertise_irb == "ture":
cmd = "peer %s advertise irb" % remote_address
else:
cmd = "undo peer %s advertise irb" % remote_address
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str += "<advertiseArp>%s</advertiseArp>" % advertise_arp
if advertise_arp == "ture":
cmd = "peer %s advertise arp" % remote_address
else:
cmd = "undo peer %s advertise arp" % remote_address
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str += "<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>" % advertise_remote_nexthop
if advertise_remote_nexthop == "true":
cmd = "peer %s advertise remote-nexthop" % remote_address
else:
cmd = "undo peer %s advertise remote-nexthop" % remote_address
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str += "<advertiseCommunity>%s</advertiseCommunity>" % advertise_community
if advertise_community == "true":
cmd = "peer %s advertise-community" % remote_address
else:
cmd = "undo peer %s advertise-community" % remote_address
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str += "<advertiseExtCommunity>%s</advertiseExtCommunity>" % advertise_ext_community
if advertise_ext_community == "true":
cmd = "peer %s advertise-ext-community" % remote_address
else:
cmd = "undo peer %s advertise-ext-community" % remote_address
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str += "<discardExtCommunity>%s</discardExtCommunity>" % discard_ext_community
if discard_ext_community == "true":
cmd = "peer %s discard-ext-community" % remote_address
else:
cmd = "undo peer %s discard-ext-community" % remote_address
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str += "<allowAsLoopEnable>%s</allowAsLoopEnable>" % allow_as_loop_enable
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop" % remote_address
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += "<allowAsLoopLimit>%s</allowAsLoopLimit>" % allow_as_loop_limit
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop %s" % (remote_address, allow_as_loop_limit)
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "peer %s keep-all-routes" % remote_address
else:
cmd = "undo peer %s keep-all-routes" % remote_address
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += "<nextHopConfigure>%s</nextHopConfigure>" % nexthop_configure
if nexthop_configure == "local":
cmd = "peer %s next-hop-local" % remote_address
cmds.append(cmd)
elif nexthop_configure == "invariable":
cmd = "peer %s next-hop-invariable" % remote_address
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += "<preferredValue>%s</preferredValue>" % preferred_value
cmd = "peer %s preferred-value %s" % (remote_address, preferred_value)
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str += "<publicAsOnly>%s</publicAsOnly>" % public_as_only
if public_as_only == "true":
cmd = "peer %s public-as-only" % remote_address
else:
cmd = "undo peer %s public-as-only" % remote_address
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str += "<publicAsOnlyForce>%s</publicAsOnlyForce>" % public_as_only_force
if public_as_only_force == "true":
cmd = "peer %s public-as-only force" % remote_address
else:
cmd = "undo peer %s public-as-only force" % remote_address
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str += "<publicAsOnlyLimited>%s</publicAsOnlyLimited>" % public_as_only_limited
if public_as_only_limited == "true":
cmd = "peer %s public-as-only limited" % remote_address
else:
cmd = "undo peer %s public-as-only limited" % remote_address
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str += "<publicAsOnlyReplace>%s</publicAsOnlyReplace>" % public_as_only_replace
if public_as_only_replace == "true":
cmd = "peer %s public-as-only force replace" % remote_address
else:
cmd = "undo peer %s public-as-only force replace" % remote_address
cmds.append(cmd)
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str += "<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>" % public_as_only_skip_peer_as
if public_as_only_skip_peer_as == "true":
cmd = "peer %s public-as-only force include-peer-as" % remote_address
else:
cmd = "undo peer %s public-as-only force include-peer-as" % remote_address
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += "<routeLimit>%s</routeLimit>" % route_limit
cmd = "peer %s route-limit %s" % (remote_address, route_limit)
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += "<routeLimitPercent>%s</routeLimitPercent>" % route_limit_percent
cmd = "peer %s route-limit %s %s" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += "<routeLimitType>%s</routeLimitType>" % route_limit_type
if route_limit_type == "alertOnly":
cmd = "peer %s route-limit %s %s alert-only" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleForever":
cmd = "peer %s route-limit %s %s idle-forever" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleTimeout":
cmd = "peer %s route-limit %s %s idle-timeout" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += "<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>" % route_limit_idle_timeout
cmd = "peer %s route-limit %s %s idle-timeout %s" % (remote_address, route_limit,
route_limit_percent, route_limit_idle_timeout)
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += "<rtUpdtInterval>%s</rtUpdtInterval>" % rt_updt_interval
cmd = "peer %s route-update-interval %s" % (remote_address, rt_updt_interval)
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str += "<redirectIP>%s</redirectIP>" % redirect_ip
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str += "<redirectIPVaildation>%s</redirectIPVaildation>" % redirect_ip_vaildation
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str += "<reflectClient>%s</reflectClient>" % reflect_client
if reflect_client == "true":
cmd = "peer %s reflect-client" % remote_address
else:
cmd = "undo peer %s reflect-client" % remote_address
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str += "<substituteAsEnable>%s</substituteAsEnable>" % substitute_as_enable
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += "<importRtPolicyName>%s</importRtPolicyName>" % import_rt_policy_name
cmd = "peer %s route-policy %s import" % (remote_address, import_rt_policy_name)
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += "<exportRtPolicyName>%s</exportRtPolicyName>" % export_rt_policy_name
cmd = "peer %s route-policy %s export" % (remote_address, export_rt_policy_name)
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += "<importPrefFiltName>%s</importPrefFiltName>" % import_pref_filt_name
cmd = "peer %s filter-policy %s import" % (remote_address, import_pref_filt_name)
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += "<exportPrefFiltName>%s</exportPrefFiltName>" % export_pref_filt_name
cmd = "peer %s filter-policy %s export" % (remote_address, export_pref_filt_name)
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += "<importAsPathFilter>%s</importAsPathFilter>" % import_as_path_filter
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_filter)
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += "<exportAsPathFilter>%s</exportAsPathFilter>" % export_as_path_filter
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_filter)
cmds.append(cmd)
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += "<importAsPathNameOrNum>%s</importAsPathNameOrNum>" % import_as_path_name_or_num
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_name_or_num)
cmds.append(cmd)
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += "<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>" % export_as_path_name_or_num
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_name_or_num)
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += "<importAclNameOrNum>%s</importAclNameOrNum>" % import_acl_name_or_num
cmd = "peer %s filter-policy %s import" % (remote_address, import_acl_name_or_num)
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += "<exportAclNameOrNum>%s</exportAclNameOrNum>" % export_acl_name_or_num
cmd = "peer %s filter-policy %s export" % (remote_address, export_acl_name_or_num)
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str += "<ipprefixOrfEnable>%s</ipprefixOrfEnable>" % ipprefix_orf_enable
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix" % remote_address
else:
cmd = "undo peer %s capability-advertise orf ip-prefix" % remote_address
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str += "<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>" % is_nonstd_ipprefix_mod
if is_nonstd_ipprefix_mod == "true":
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf non-standard-compatible" % remote_address
else:
cmd = "undo peer %s capability-advertise orf non-standard-compatible" % remote_address
cmds.append(cmd)
else:
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf" % remote_address
else:
cmd = "undo peer %s capability-advertise orf" % remote_address
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += "<orftype>%s</orftype>" % orftype
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += "<orfMode>%s</orfMode>" % orf_mode
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
else:
cmd = "undo peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += "<soostring>%s</soostring>" % soostring
cmd = "peer %s soo %s" % (remote_address, soostring)
cmds.append(cmd)
cmd = ""
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str += "<defaultRtAdvEnable>%s</defaultRtAdvEnable>" % default_rt_adv_enable
if default_rt_adv_enable == "true":
cmd += "peer %s default-route-advertise" % remote_address
else:
cmd += "undo peer %s default-route-advertise" % remote_address
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += "<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>" % default_rt_adv_policy
cmd += " route-policy %s" % default_rt_adv_policy
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += "<defaultRtMatchMode>%s</defaultRtMatchMode>" % default_rt_match_mode
if default_rt_match_mode == "matchall":
cmd += " conditional-route-match-all"
elif default_rt_match_mode == "matchany":
cmd += " conditional-route-match-any"
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += "<addPathMode>%s</addPathMode>" % add_path_mode
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += "<advAddPathNum>%s</advAddPathNum>" % adv_add_path_num
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str += "<originAsValid>%s</originAsValid>" % origin_as_valid
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str += "<vplsEnable>%s</vplsEnable>" % vpls_enable
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str += "<vplsAdDisable>%s</vplsAdDisable>" % vpls_ad_disable
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str += "<updatePktStandardCompatible>%s</updatePktStandardCompatible>" % update_pkt_standard_compatible
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
vrf_name=dict(type='str', required=True),
af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn',
'ipv6uni', 'ipv6vpn', 'evpn'], required=True),
remote_address=dict(type='str', required=True),
advertise_irb=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_arp=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_remote_nexthop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
discard_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_limit=dict(type='str'),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
nexthop_configure=dict(choices=['null', 'local', 'invariable']),
preferred_value=dict(type='str'),
public_as_only=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_force=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_limited=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_replace=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_skip_peer_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
route_limit=dict(type='str'),
route_limit_percent=dict(type='str'),
route_limit_type=dict(
choices=['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']),
route_limit_idle_timeout=dict(type='str'),
rt_updt_interval=dict(type='str'),
redirect_ip=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
redirect_ip_vaildation=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
reflect_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
substitute_as_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
import_rt_policy_name=dict(type='str'),
export_rt_policy_name=dict(type='str'),
import_pref_filt_name=dict(type='str'),
export_pref_filt_name=dict(type='str'),
import_as_path_filter=dict(type='str'),
export_as_path_filter=dict(type='str'),
import_as_path_name_or_num=dict(type='str'),
export_as_path_name_or_num=dict(type='str'),
import_acl_name_or_num=dict(type='str'),
export_acl_name_or_num=dict(type='str'),
ipprefix_orf_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_nonstd_ipprefix_mod=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
orftype=dict(type='str'),
orf_mode=dict(choices=['null', 'receive', 'send', 'both']),
soostring=dict(type='str'),
default_rt_adv_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_rt_adv_policy=dict(type='str'),
default_rt_match_mode=dict(choices=['null', 'matchall', 'matchany']),
add_path_mode=dict(choices=['null', 'receive', 'send', 'both']),
adv_add_path_num=dict(type='str'),
origin_as_valid=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_ad_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
update_pkt_standard_compatible=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']))
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
advertise_irb = module.params['advertise_irb']
advertise_arp = module.params['advertise_arp']
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
advertise_community = module.params['advertise_community']
advertise_ext_community = module.params['advertise_ext_community']
discard_ext_community = module.params['discard_ext_community']
allow_as_loop_enable = module.params['allow_as_loop_enable']
allow_as_loop_limit = module.params['allow_as_loop_limit']
keep_all_routes = module.params['keep_all_routes']
nexthop_configure = module.params['nexthop_configure']
preferred_value = module.params['preferred_value']
public_as_only = module.params['public_as_only']
public_as_only_force = module.params['public_as_only_force']
public_as_only_limited = module.params['public_as_only_limited']
public_as_only_replace = module.params['public_as_only_replace']
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
route_limit = module.params['route_limit']
route_limit_percent = module.params['route_limit_percent']
route_limit_type = module.params['route_limit_type']
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
rt_updt_interval = module.params['rt_updt_interval']
redirect_ip = module.params['redirect_ip']
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
reflect_client = module.params['reflect_client']
substitute_as_enable = module.params['substitute_as_enable']
import_rt_policy_name = module.params['import_rt_policy_name']
export_rt_policy_name = module.params['export_rt_policy_name']
import_pref_filt_name = module.params['import_pref_filt_name']
export_pref_filt_name = module.params['export_pref_filt_name']
import_as_path_filter = module.params['import_as_path_filter']
export_as_path_filter = module.params['export_as_path_filter']
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
import_acl_name_or_num = module.params['import_acl_name_or_num']
export_acl_name_or_num = module.params['export_acl_name_or_num']
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
orftype = module.params['orftype']
orf_mode = module.params['orf_mode']
soostring = module.params['soostring']
default_rt_adv_enable = module.params['default_rt_adv_enable']
default_rt_adv_policy = module.params['default_rt_adv_policy']
default_rt_match_mode = module.params['default_rt_match_mode']
add_path_mode = module.params['add_path_mode']
adv_add_path_num = module.params['adv_add_path_num']
origin_as_valid = module.params['origin_as_valid']
vpls_enable = module.params['vpls_enable']
vpls_ad_disable = module.params['vpls_ad_disable']
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
ce_bgp_peer_af_obj = BgpNeighborAf()
# get proposed
proposed["state"] = state
if vrf_name:
proposed["vrf_name"] = vrf_name
if af_type:
proposed["af_type"] = af_type
if remote_address:
proposed["remote_address"] = remote_address
if advertise_irb != 'no_use':
proposed["advertise_irb"] = advertise_irb
if advertise_arp != 'no_use':
proposed["advertise_arp"] = advertise_arp
if advertise_remote_nexthop != 'no_use':
proposed["advertise_remote_nexthop"] = advertise_remote_nexthop
if advertise_community != 'no_use':
proposed["advertise_community"] = advertise_community
if advertise_ext_community != 'no_use':
proposed["advertise_ext_community"] = advertise_ext_community
if discard_ext_community != 'no_use':
proposed["discard_ext_community"] = discard_ext_community
if allow_as_loop_enable != 'no_use':
proposed["allow_as_loop_enable"] = allow_as_loop_enable
if allow_as_loop_limit:
proposed["allow_as_loop_limit"] = allow_as_loop_limit
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if nexthop_configure:
proposed["nexthop_configure"] = nexthop_configure
if preferred_value:
proposed["preferred_value"] = preferred_value
if public_as_only != 'no_use':
proposed["public_as_only"] = public_as_only
if public_as_only_force != 'no_use':
proposed["public_as_only_force"] = public_as_only_force
if public_as_only_limited != 'no_use':
proposed["public_as_only_limited"] = public_as_only_limited
if public_as_only_replace != 'no_use':
proposed["public_as_only_replace"] = public_as_only_replace
if public_as_only_skip_peer_as != 'no_use':
proposed["public_as_only_skip_peer_as"] = public_as_only_skip_peer_as
if route_limit:
proposed["route_limit"] = route_limit
if route_limit_percent:
proposed["route_limit_percent"] = route_limit_percent
if route_limit_type:
proposed["route_limit_type"] = route_limit_type
if route_limit_idle_timeout:
proposed["route_limit_idle_timeout"] = route_limit_idle_timeout
if rt_updt_interval:
proposed["rt_updt_interval"] = rt_updt_interval
if redirect_ip != 'no_use':
proposed["redirect_ip"] = redirect_ip
if redirect_ip_vaildation != 'no_use':
proposed["redirect_ip_vaildation"] = redirect_ip_vaildation
if reflect_client != 'no_use':
proposed["reflect_client"] = reflect_client
if substitute_as_enable != 'no_use':
proposed["substitute_as_enable"] = substitute_as_enable
if import_rt_policy_name:
proposed["import_rt_policy_name"] = import_rt_policy_name
if export_rt_policy_name:
proposed["export_rt_policy_name"] = export_rt_policy_name
if import_pref_filt_name:
proposed["import_pref_filt_name"] = import_pref_filt_name
if export_pref_filt_name:
proposed["export_pref_filt_name"] = export_pref_filt_name
if import_as_path_filter:
proposed["import_as_path_filter"] = import_as_path_filter
if export_as_path_filter:
proposed["export_as_path_filter"] = export_as_path_filter
if import_as_path_name_or_num:
proposed["import_as_path_name_or_num"] = import_as_path_name_or_num
if export_as_path_name_or_num:
proposed["export_as_path_name_or_num"] = export_as_path_name_or_num
if import_acl_name_or_num:
proposed["import_acl_name_or_num"] = import_acl_name_or_num
if export_acl_name_or_num:
proposed["export_acl_name_or_num"] = export_acl_name_or_num
if ipprefix_orf_enable != 'no_use':
proposed["ipprefix_orf_enable"] = ipprefix_orf_enable
if is_nonstd_ipprefix_mod != 'no_use':
proposed["is_nonstd_ipprefix_mod"] = is_nonstd_ipprefix_mod
if orftype:
proposed["orftype"] = orftype
if orf_mode:
proposed["orf_mode"] = orf_mode
if soostring:
proposed["soostring"] = soostring
if default_rt_adv_enable != 'no_use':
proposed["default_rt_adv_enable"] = default_rt_adv_enable
if default_rt_adv_policy:
proposed["default_rt_adv_policy"] = default_rt_adv_policy
if default_rt_match_mode:
proposed["default_rt_match_mode"] = default_rt_match_mode
if add_path_mode:
proposed["add_path_mode"] = add_path_mode
if adv_add_path_num:
proposed["adv_add_path_num"] = adv_add_path_num
if origin_as_valid != 'no_use':
proposed["origin_as_valid"] = origin_as_valid
if vpls_enable != 'no_use':
proposed["vpls_enable"] = vpls_enable
if vpls_ad_disable != 'no_use':
proposed["vpls_ad_disable"] = vpls_ad_disable
if update_pkt_standard_compatible != 'no_use':
proposed["update_pkt_standard_compatible"] = update_pkt_standard_compatible
if not ce_bgp_peer_af_obj:
module.fail_json(msg='Error: Init module failed.')
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
# state exist bgp peer address family config
exist_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_rst[item]
if exist_tmp:
existing["bgp neighbor af"] = exist_tmp
# state exist bgp peer address family other config
exist_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_other_rst[item]
if exist_tmp:
existing["bgp neighbor af other"] = exist_tmp
if state == "present":
if bgp_peer_af_rst["need_cfg"]:
if "remote_address" in bgp_peer_af_rst.keys():
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_peer_af_obj.create_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if bgp_peer_af_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.delete_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
pass
# state end bgp peer address family config
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
end_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_rst[item]
if end_tmp:
end_state["bgp neighbor af"] = end_tmp
# state end bgp peer address family other config
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
end_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_other_rst[item]
if end_tmp:
end_state["bgp neighbor af other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/metrics/tests/test_ranking.py | 3 | 43099 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`.
Note that this implementation fails on some edge cases.
For example, for constant predictions e.g. [0.5, 0.5, 0.5],
y_true = [1, 0, 0] returns an average precision of 0.33...
but y_true = [0, 0, 1] returns 1.0.
"""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def _average_precision_slow(y_true, y_score):
"""A second alternative implementation of average precision that closely
follows the Wikipedia article's definition (see References). This should
give identical results as `average_precision_score` for all inputs.
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
"""
precision, recall, threshold = precision_recall_curve(y_true, y_score)
precision = list(reversed(precision))
recall = list(reversed(recall))
average_precision = 0
for i in range(1, len(precision)):
average_precision += precision[i] * (recall[i] - recall[i - 1])
return average_precision
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_binary_clf_curve():
rng = check_random_state(404)
y_true = rng.randint(0, 3, size=10)
y_pred = rng.rand(10)
msg = "multiclass format is not supported"
assert_raise_message(ValueError, msg, precision_recall_curve,
y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = _average_precision_slow(y_true, probas_pred)
assert_array_almost_equal(precision_recall_auc, 0.859, 3)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, decimal=3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
# Here we are doing a terrible prediction: we are always getting
# it wrong, hence the average_precision_score is the accuracy at
# chance: 50%
assert_almost_equal(auc_prc, 0.5)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .5)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
def test_average_precision_constant_values():
# Check the average_precision_score of a constant predictor is
# the TPR
# Generate a dataset with 25% of positives
y_true = np.zeros(100, dtype=int)
y_true[::4] = 1
# And a constant score
y_score = np.ones(100)
# The precision is then the fraction of positive whatever the recall
# is, as there is only one threshold:
assert_equal(average_precision_score(y_true, y_score), .25)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| mit |
droundy/deft | papers/thesis-kirstie/figs/plot_LJ_Potential.py | 1 | 1142 | #!/usr/bin/python3
#RUN this program from the directory it is listed in
#with command ./plot_LJ_Potential.py
from scipy import special
import numpy as np
import matplotlib.pyplot as plt
import math
#Plot WCA Potential vs r
#R=1/1.781797436 #for a sigma=1 DOESN'T WORK!! graph wrong shape!
R=1/1.781797436
epsilon=1
sigma=1
#print sigma
#r=np.linspace(.1, 2*R, 200)
#r=np.linspace(.9, 4, 200) #SAVE!!! for plotting r
r=np.linspace(.9, 2.5, 200)
r_dless=sigma/r #plot dimensionless quantity!
sigma_over_r_to_pow6=(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) + epsilon #WCA potential
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential but looks like WCA
V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential
plt.plot(1/r_dless,V)
plt.xlim(right=2.5)
plt.ylim(top=V.max())
plt.xlabel('r/$\sigma$')
#plt.xlabel('r')
plt.ylabel('V(r)/$\epsilon$')
plt.title('Leonard-Jones Potential')
#plt.legend()
plt.savefig("LJ_Potential.pdf")
# plt.show()
| gpl-2.0 |
Stavitsky/neutron | neutron/plugins/sriovnicagent/common/config.py | 48 | 3679 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from neutron.agent.common import config
def parse_exclude_devices(exclude_list):
"""Parse Exclude devices list
parses excluded device list in the form:
dev_name:pci_dev_1;pci_dev_2
@param exclude list: list of string pairs in "key:value" format
the key part represents the network device name
the value part is a list of PCI slots separated by ";"
"""
exclude_mapping = {}
for dev_mapping in exclude_list:
try:
dev_name, exclude_devices = dev_mapping.split(":", 1)
except ValueError:
raise ValueError(_("Invalid mapping: '%s'") % dev_mapping)
dev_name = dev_name.strip()
if not dev_name:
raise ValueError(_("Missing key in mapping: '%s'") % dev_mapping)
if dev_name in exclude_mapping:
raise ValueError(_("Device %(dev_name)s in mapping: %(mapping)s "
"not unique") % {'dev_name': dev_name,
'mapping': dev_mapping})
exclude_devices_list = exclude_devices.split(";")
exclude_devices_set = set()
for dev in exclude_devices_list:
dev = dev.strip()
if dev:
exclude_devices_set.add(dev)
exclude_mapping[dev_name] = exclude_devices_set
return exclude_mapping
DEFAULT_DEVICE_MAPPINGS = []
DEFAULT_EXCLUDE_DEVICES = []
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
sriov_nic_opts = [
cfg.ListOpt('physical_device_mappings',
default=DEFAULT_DEVICE_MAPPINGS,
help=_("List of <physical_network>:<network_device> mapping "
"physical network names to the agent's node-specific "
"physical network device of SR-IOV physical "
"function to be used for VLAN networks. "
"All physical networks listed in network_vlan_ranges "
"on the server should have mappings to appropriate "
"interfaces on each agent")),
cfg.ListOpt('exclude_devices',
default=DEFAULT_EXCLUDE_DEVICES,
help=_("List of <network_device>:<excluded_devices> "
"mapping network_device to the agent's node-specific "
"list of virtual functions that should not be used "
"for virtual networking. excluded_devices is a "
"semicolon separated list of virtual functions "
"(BDF format).to exclude from network_device. "
"The network_device in the mapping should appear in "
"the physical_device_mappings list.")),
]
cfg.CONF.register_opts(agent_opts, 'AGENT')
cfg.CONF.register_opts(sriov_nic_opts, 'SRIOV_NIC')
config.register_agent_state_opts_helper(cfg.CONF)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.