gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import unicode_literals
import re
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.lookups import Lookup
from django.db.models.expressions import ExpressionNode, Col
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, ExpressionNode):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql}
backend_op = connection.ops.gis_operators[self.lookup_name]
return backend_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def get_db_prep_lookup(self, value, connection):
if isinstance(value, (tuple, list)):
if not 2 <= len(value) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(value[0])]
# Getting the distance parameter in the units of the field.
params += connection.ops.get_distance(self.lhs.output_field, value[1:], self.lookup_name)
return ('%s', params)
else:
return super(DistanceLookupBase, self).get_db_prep_lookup(value, connection)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| |
from __future__ import division, unicode_literals
from future.builtins import str
from copy import copy
from django.conf import settings
from django.contrib.contenttypes.generic import GenericRelation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import IntegerField, CharField, FloatField
from django.db.models.signals import post_save, post_delete
from mezzanine.utils.models import lazy_model_ops
class BaseGenericRelation(GenericRelation):
"""
Extends ``GenericRelation`` to:
- Add a consistent default value for ``object_id_field`` and
check for a ``related_model`` attribute which can be defined
on subclasses as a default for the ``to`` argument.
- Add one or more custom fields to the model that the relation
field is applied to, and then call a ``related_items_changed``
method each time related items are saved or deleted, so that a
calculated value can be stored against the custom fields since
aggregates aren't available for GenericRelation instances.
"""
# Mapping of field names to model fields that will be added.
fields = {}
def __init__(self, *args, **kwargs):
"""
Set up some defaults and check for a ``related_model``
attribute for the ``to`` argument.
"""
if kwargs.get("frozen_by_south", False):
raise Exception("""
Your project contains migrations that include one of the fields
from mezzanine.generic in its Migration.model dict: possibly
KeywordsField, CommentsField or RatingField. These migratons no
longer work with the latest versions of Django and South, so you'll
need to fix them by hand. This is as simple as commenting out or
deleting the field from the Migration.model dict.
See http://bit.ly/1hecVsD for an example.
""")
kwargs.setdefault("object_id_field", "object_pk")
to = getattr(self, "related_model", None)
if to:
kwargs.setdefault("to", to)
super(BaseGenericRelation, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
# In Django 1.6, add_to_class will be called on a
# parent model's field more than once, so
# contribute_to_class needs to be idempotent. We
# don't call get_all_field_names() which fill the app
# cache get_fields_with_model() is safe.
if name_string in [i.name for i, _ in
cls._meta.get_fields_with_model()]:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
def connect_save(sender):
post_save.connect(self._related_items_changed, sender=sender)
def connect_delete(sender):
post_delete.connect(self._related_items_changed, sender=sender)
lazy_model_ops.add(connect_save, self.rel.to)
lazy_model_ops.add(connect_delete, self.rel.to)
def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager)
def related_items_changed(self, instance, related_manager):
"""
Can be implemented by subclasses - called whenever the
state of related items change, eg they're saved or deleted.
The instance for this field and the related manager for the
field are passed as arguments.
"""
pass
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
Needed for Django 1.7: https://code.djangoproject.com/ticket/22552
"""
return getattr(obj, self.attname).all()
class CommentsField(BaseGenericRelation):
"""
Stores the number of comments against the
``COMMENTS_FIELD_NAME_count`` field when a comment is saved or
deleted.
"""
related_model = "generic.ThreadedComment"
fields = {"%s_count": IntegerField(editable=False, default=0)}
def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save()
class KeywordsField(BaseGenericRelation):
"""
Stores the keywords as a single string into the
``KEYWORDS_FIELD_NAME_string`` field for convenient access when
searching.
"""
related_model = "generic.AssignedKeyword"
fields = {"%s_string": CharField(editable=False, blank=True,
max_length=500)}
def __init__(self, *args, **kwargs):
"""
Mark the field as editable so that it can be specified in
admin class fieldsets and pass validation, and also so that
it shows up in the admin form.
"""
super(KeywordsField, self).__init__(*args, **kwargs)
self.editable = True
def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from mezzanine.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from mezzanine.generic.models import AssignedKeyword, Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [AssignedKeyword(keyword_id=i) for i in new_ids]
# Remove Keyword instances than no longer have a
# related AssignedKeyword instance.
existing = AssignedKeyword.objects.filter(keyword__id__in=removed_ids)
existing_ids = set([str(a.keyword_id) for a in existing])
unused_ids = removed_ids - existing_ids
Keyword.objects.filter(id__in=unused_ids).delete()
super(KeywordsField, self).save_form_data(instance, data)
def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight
def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save()
class RatingField(BaseGenericRelation):
"""
Stores the rating count and average against the
``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average``
fields when a rating is saved or deleted.
"""
related_model = "generic.Rating"
fields = {"%s_count": IntegerField(default=0, editable=False),
"%s_sum": IntegerField(default=0, editable=False),
"%s_average": FloatField(default=0, editable=False)}
def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save()
# South requires custom fields to be given "rules".
# See http://south.aeracode.org/docs/customfields.html
if "south" in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules=[((BaseGenericRelation,), [], {})],
patterns=["mezzanine\.generic\.fields\."])
except ImportError:
pass
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import time
import webob.dec
import webob.exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import auth
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova import wsgi as base_wsgi
LOG = logging.getLogger('nova.api.openstack.compute.auth')
FLAGS = flags.FLAGS
flags.DECLARE('use_forwarded_for', 'nova.api.auth')
class NoAuthMiddleware(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
os_url = os.path.join(req.url, project_id)
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.headers['X-Storage-Url'] = ''
res.headers['X-CDN-Management-Url'] = ''
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class AuthMiddleware(base_wsgi.Middleware):
"""Authorize the openstack API request or return an HTTP Forbidden."""
def __init__(self, application, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver)
self.auth = auth.manager.AuthManager()
super(AuthMiddleware, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
user_id = self.get_user_by_authentication(req)
if not user_id:
token = req.headers["X-Auth-Token"]
msg = _("%(user_id)s could not be found with token '%(token)s'")
LOG.warn(msg % locals())
return wsgi.Fault(webob.exc.HTTPUnauthorized())
# Get all valid projects for the user
projects = self.auth.get_projects(user_id)
if not projects:
return wsgi.Fault(webob.exc.HTTPUnauthorized())
project_id = ""
path_parts = req.path.split('/')
# TODO(wwolf): this v1.1 check will be temporary as
# keystone should be taking this over at some point
if len(path_parts) > 1 and path_parts[1] in ('v1.1', 'v2'):
project_id = path_parts[2]
# Check that the project for project_id exists, and that user
# is authorized to use it
try:
self.auth.get_project(project_id)
except exception.ProjectNotFound:
return wsgi.Fault(webob.exc.HTTPUnauthorized())
if project_id not in [p.id for p in projects]:
return wsgi.Fault(webob.exc.HTTPUnauthorized())
else:
# As a fallback, set project_id from the headers, which is the v1.0
# behavior. As a last resort, be forgiving to the user and set
# project_id based on a valid project of theirs.
try:
project_id = req.headers["X-Auth-Project-Id"]
except KeyError:
project_id = projects[0].id
is_admin = self.auth.is_admin(user_id)
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=is_admin,
remote_address=remote_address)
req.environ['nova.context'] = ctx
if not is_admin and not self.auth.is_project_member(user_id,
project_id):
msg = _("%(user_id)s must be an admin or a "
"member of %(project_id)s")
LOG.warn(msg % locals())
return wsgi.Fault(webob.exc.HTTPUnauthorized())
return self.application
def has_authentication(self, req):
return 'X-Auth-Token' in req.headers
def get_user_by_authentication(self, req):
return self.authorize_token(req.headers["X-Auth-Token"])
def authenticate(self, req):
# Unless the request is explicitly made against /<version>/ don't
# honor it
path_info = req.path_info
if len(path_info) > 1:
msg = _("Authentication requests must be made against a version "
"root (e.g. /v2).")
LOG.warn(msg)
return wsgi.Fault(webob.exc.HTTPUnauthorized(explanation=msg))
def _get_auth_header(key):
"""Ensures that the KeyError returned is meaningful."""
try:
return req.headers[key]
except KeyError as ex:
raise KeyError(key)
try:
username = _get_auth_header('X-Auth-User')
key = _get_auth_header('X-Auth-Key')
except KeyError as ex:
msg = _("Could not find %s in request.") % ex
LOG.warn(msg)
return wsgi.Fault(webob.exc.HTTPUnauthorized(explanation=msg))
token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
res.headers['X-Auth-Token'] = token['token_hash']
res.headers['X-Server-Management-Url'] = \
token['server_management_url']
res.headers['X-Storage-Url'] = token['storage_url']
res.headers['X-CDN-Management-Url'] = token['cdn_management_url']
res.content_type = 'text/plain'
res.status = '204'
LOG.debug(_("Successfully authenticated '%s'") % username)
return res
else:
return wsgi.Fault(webob.exc.HTTPUnauthorized())
def authorize_token(self, token_hash):
""" retrieves user information from the datastore given a token
If the token has expired, returns None
If the token is not found, returns None
Otherwise returns dict(id=(the authorized user's id))
This method will also remove the token if the timestamp is older than
2 days ago.
"""
ctxt = context.get_admin_context()
try:
token = self.db.auth_token_get(ctxt, token_hash)
except exception.NotFound:
return None
if token:
delta = utils.utcnow() - token['created_at']
if delta.days >= 2:
self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
return token['user_id']
return None
def _authorize_user(self, username, key, req):
"""Generates a new token and assigns it to a user.
username - string
key - string API key
req - wsgi.Request object
"""
ctxt = context.get_admin_context()
project_id = req.headers.get('X-Auth-Project-Id')
if project_id is None:
# If the project_id is not provided in the headers, be forgiving to
# the user and set project_id based on a valid project of theirs.
user = self.auth.get_user_from_access_key(key)
projects = self.auth.get_projects(user.id)
if not projects:
raise webob.exc.HTTPUnauthorized()
project_id = projects[0].id
try:
user = self.auth.get_user_from_access_key(key)
except exception.NotFound:
LOG.warn(_("User not found with provided API key."))
user = None
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
time.time())).hexdigest()
token_dict = {}
token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = ''
os_url = req.url.strip('/')
os_url += '/' + project_id
token_dict['server_management_url'] = os_url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
token = self.db.auth_token_create(ctxt, token_dict)
return token, user
elif user and user.name != username:
msg = _("Provided API key is valid, but not for user "
"'%(username)s'") % locals()
LOG.warn(msg)
return None, None
| |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import six
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class PlugValueWidgetTest( GafferUITest.TestCase ) :
def testContext( self ) :
s = Gaffer.ScriptNode()
s["m"] = GafferTest.MultiplyNode()
s["e"] = Gaffer.Expression()
s["e"].setExpression( "parent[\"m\"][\"op1\"] = int( context[\"frame\"] )" )
w = GafferUI.NumericPlugValueWidget( s["m"]["op1"] )
self.assertTrue( w.getContext().isSame( s.context() ) )
s.context().setFrame( 10 )
self.assertEqual( w.numericWidget().getValue(), 10 )
context = Gaffer.Context()
context.setFrame( 20 )
w.setContext( context )
self.assertTrue( w.getContext().isSame( context ) )
self.assertEqual( w.numericWidget().getValue(), 20 )
def testDisableCreationForSpecificTypes( self ) :
class ValueWidgetTestPlug( Gaffer.ValuePlug ) :
def __init__( self, name="TestPlug", direction=Gaffer.Plug.Direction.In, flags=Gaffer.Plug.Flags.Default ) :
Gaffer.ValuePlug.__init__( self, name, direction, flags )
IECore.registerRunTimeTyped( ValueWidgetTestPlug )
n = Gaffer.Node()
n["p"] = ValueWidgetTestPlug()
w = GafferUI.PlugValueWidget.create( n["p"] )
self.assertIsInstance( w, GafferUI.ConnectionPlugValueWidget )
GafferUI.PlugValueWidget.registerType( ValueWidgetTestPlug, None )
w = GafferUI.PlugValueWidget.create( n["p"] )
self.assertEqual( w, None )
def testCreate( self ) :
n = Gaffer.Node()
n["p"] = Gaffer.IntPlug()
w = GafferUI.PlugValueWidget.create( n["p"] )
self.assertTrue( isinstance( w, GafferUI.NumericPlugValueWidget ) )
self.assertTrue( w.getPlug().isSame( n["p"] ) )
Gaffer.Metadata.registerValue( n["p"], "plugValueWidget:type", "GafferUI.ConnectionPlugValueWidget" )
w = GafferUI.PlugValueWidget.create( n["p"] )
self.assertTrue( isinstance( w, GafferUI.ConnectionPlugValueWidget ) )
self.assertTrue( w.getPlug().isSame( n["p"] ) )
def testPlugTypesMustMatch( self ) :
n = Gaffer.Node()
n["user"]["p1"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["user"]["p2"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
with six.assertRaisesRegex( self, ValueError, "Plugs have different types" ) :
GafferUI.NumericPlugValueWidget( n["user"].children() )
w = GafferUI.NumericPlugValueWidget( n["user"]["p1"] )
with six.assertRaisesRegex( self, ValueError, "Plugs have different types" ) :
w.setPlugs( n["user"].children() )
def testGetPlugWithMultiplePlugs( self ) :
n = Gaffer.Node()
n["user"]["p1"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["user"]["p2"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
w = GafferUI.NumericPlugValueWidget( n["user"].children() )
self.assertRaises( GafferUI.PlugValueWidget.MultiplePlugsError, w.getPlug )
def testCreateThrowsIfMultipleWidgetCreators( self ) :
n = Gaffer.Node()
n["user"]["p1"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["user"]["p2"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.Metadata.registerValue( n["user"]["p1"], "plugValueWidget:type", "GafferUI.ConnectionPlugValueWidget" )
with six.assertRaisesRegex( self, Exception, "Multiple widget creators" ) :
GafferUI.PlugValueWidget.create( n["user"].children() )
def testCreateSupportsLegacyWidgetsWithSinglePlugs( self ) :
class SinglePlugOnlyWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
GafferUI.PlugValueWidget.__init__( self, GafferUI.TextWidget(), plug, **kw )
if not isinstance( plug, Gaffer.Plug ) :
raise GafferUI.PlugValueWidget.MultiplePlugsError()
class LegacyWidgetTestPlug( Gaffer.ValuePlug ) :
pass
IECore.registerRunTimeTyped( LegacyWidgetTestPlug )
GafferUI.PlugValueWidget.registerType( LegacyWidgetTestPlug, SinglePlugOnlyWidget )
n = Gaffer.Node()
n["user"]["p1"] = LegacyWidgetTestPlug()
n["user"]["p2"] = LegacyWidgetTestPlug()
self.assertIsInstance( GafferUI.PlugValueWidget.create( n["user"]["p1"] ), SinglePlugOnlyWidget )
self.assertIsInstance( GafferUI.PlugValueWidget.create( { n["user"]["p1"] } ), SinglePlugOnlyWidget )
with self.assertRaises( GafferUI.PlugValueWidget.MultiplePlugsError ) :
GafferUI.PlugValueWidget.create( { n["user"]["p1"], n["user"]["p2"] } )
def testAcquire( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.IntPlug()
# hold a reference to the ScriptWindow
# to make sure it stays alive
sw = GafferUI.ScriptWindow.acquire( s )
w = GafferUI.PlugValueWidget.acquire( s["n"]["p"] )
self.assertTrue( isinstance( w, GafferUI.NumericPlugValueWidget ) )
self.assertTrue( w.getPlug().isSame( s["n"]["p"] ) )
self.assertTrue( GafferUI.PlugValueWidget.acquire( s["n"]["p"] ) is w )
pw = GafferUI.PlugWidget.acquire( s["n"]["p"] )
self.assertTrue( isinstance( pw, GafferUI.PlugWidget ) )
self.assertTrue( pw.plugValueWidget() is w )
self.assertTrue( GafferUI.PlugWidget.acquire( s["n"]["p"] ) is pw )
def testContextSensitivePresets( self ) :
def presetNames( plug ) :
c = Gaffer.Context.current()
return IECore.StringVectorData(
[ k for k in c.keys() if k.startswith( "preset" ) ]
)
def presetValues( plug ) :
c = Gaffer.Context.current()
return IECore.IntVectorData(
[ c[k] for k in Gaffer.Context.current().keys() if k.startswith( "preset" ) ]
)
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "presetNames", presetNames )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "presetValues", presetValues )
script = Gaffer.ScriptNode()
script["variables"]["presetOne"] = Gaffer.NameValuePlug( "presetOne", 1 )
script["variables"]["presetTwo"] = Gaffer.NameValuePlug( "presetTwo", 2 )
script["n"] = GafferTest.AddNode()
widget = GafferUI.PlugValueWidget.create( script["n"]["op1"] )
self.assertIsInstance( widget, GafferUI.NumericPlugValueWidget )
menu = widget._popupMenuDefinition().item( "/Preset" ).subMenu()
self.assertEqual( { "/presetOne", "/presetTwo" }, { k for k, v in menu.items() } )
menu.item( "/presetOne" ).command()
self.assertEqual( script["n"]["op1"].getValue(), 1 )
menu.item( "/presetTwo" ).command()
self.assertEqual( script["n"]["op1"].getValue(), 2 )
def tearDown( self ) :
GafferUITest.TestCase.tearDown( self )
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "op1", "presetNames" )
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "op1", "presetValues" )
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for reading and parsing files in a variety of formats."""
import json
import os
import tempfile
import yaml
from googleapiclient.errors import HttpError
from google.cloud.forseti.common.gcp_api import storage
from google.cloud.forseti.common.util import errors as util_errors
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
def read_and_parse_file(file_path):
"""Parse a json or yaml formatted file from a local path or GCS.
Args:
file_path (str): The full path to the file to read and parse.
Returns:
dict: The results of parsing the file.
"""
file_path = file_path.strip()
if file_path.startswith('gs://'):
return _read_file_from_gcs(file_path)
return _read_file_from_local(file_path)
def copy_file_from_gcs(file_path, output_path=None, storage_client=None):
"""Copy file from GCS to local file.
Args:
file_path (str): The full GCS path to the file.
output_path (str): The local file to copy to, if not set creates a
temporary file.
storage_client (storage.StorageClient): The Storage API Client to use
for downloading the file using the API.
Returns:
str: The output_path the file was copied to.
"""
if not storage_client:
storage_client = storage.StorageClient({})
if not output_path:
tmp_file, output_path = tempfile.mkstemp()
# Ensure the handle returned by mkstemp is not leaked.
os.close(tmp_file)
with open(output_path, mode='wb') as f:
storage_client.download(full_bucket_path=file_path, output_file=f)
return output_path
def isfile(file_path):
"""Determine if the file is an existing file.
Args:
file_path (str): The local path or full GCS path to the file.
Returns:
bool: Whether or not the file exists.
Raises:
HttpError: If the response of the API is not 200 or 404.
"""
file_exists = True
if file_path.startswith('gs://'):
try:
read_and_parse_file(file_path)
except HttpError as http_error:
if http_error.resp.status == 404:
file_exists = False
else:
raise http_error
else:
file_exists = os.path.isfile(file_path)
return file_exists
def access(file_path):
"""Determine if the file is accessible.
Args:
file_path (str): The local path or full GCS path to the file.
Returns:
bool: Whether or not the file is accessible.
"""
accessible = True
if file_path.startswith('gs://'):
try:
read_and_parse_file(file_path)
except HttpError as http_error:
LOGGER.error('Unable to read %s due to %s', file_path, http_error)
accessible = False
else:
accessible = os.access(file_path, os.R_OK)
return accessible
def _get_filetype_parser(file_path, parser_type):
"""Return a parser function for parsing the file.
Args:
file_path (str): The file path.
parser_type (str): The file parser type.
Returns:
function: The parser function.
"""
filetype_handlers = {
'json': {
'string': _parse_json_string,
'file': _parse_json_file
},
'yaml': {
'string': _parse_yaml,
'file': _parse_yaml
}
}
file_ext = file_path.split('.')[-1]
if file_ext not in filetype_handlers:
raise util_errors.InvalidFileExtensionError(
'Unsupported file type: {}'.format(file_ext))
if parser_type not in filetype_handlers[file_ext]:
raise util_errors.InvalidParserTypeError(
'Unsupported parser type: {}'.format(parser_type))
return filetype_handlers[file_ext][parser_type]
def _read_file_from_gcs(file_path, storage_client=None):
"""Load file from GCS.
Args:
file_path (str): The GCS path to the file.
storage_client (storage.StorageClient): The Storage API Client to use
for downloading the file using the API.
Returns:
dict: The parsed dict from the loaded file.
"""
if not storage_client:
storage_client = storage.StorageClient({})
file_content = storage_client.get_text_file(full_bucket_path=file_path)
parser = _get_filetype_parser(file_path, 'string')
return parser(file_content)
def _read_file_from_local(file_path):
"""Load rules file from local path.
Args:
file_path (str): The path to the file.
Returns:
dict: The parsed dict from the loaded file.
"""
with open(os.path.abspath(file_path), 'r') as rules_file:
parser = _get_filetype_parser(file_path, 'file')
return parser(rules_file)
def _parse_json_string(data):
"""Parse the data from a string of json.
Args:
data (str): String data to parse into json.
Returns:
dict: The json string successfully parsed into a dict.
Raises:
ValueError: If there was an error parsing the data.
"""
try:
return json.loads(data)
except ValueError as json_error:
raise json_error
def _parse_json_file(data):
"""Parse the data from a json file.
Args:
data (filepointer): File-like object containing a Json document,
to be parsed into json.
Returns:
dict: The file successfully parsed into a dict.
Raises:
ValueError: If there was an error parsing the file.
"""
try:
return json.load(data)
except ValueError as json_error:
raise json_error
def _parse_yaml(data):
"""Parse yaml data.
Args:
data (stream): A yaml data stream to parse.
Returns:
dict: The stream successfully parsed into a dict.
Raises:
YAMLError: If there was an error parsing the stream.
"""
try:
return yaml.safe_load(data)
except yaml.YAMLError as yaml_error:
LOGGER.exception(yaml_error)
raise yaml_error
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp-group/interface-tracking/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for VRRP interface tracking
"""
__slots__ = (
"_path_helper", "_extmethods", "__track_interface", "__priority_decrement"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__track_interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
self.__priority_decrement = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv6",
"addresses",
"address",
"vrrp",
"vrrp-group",
"interface-tracking",
"state",
]
def _get_track_interface(self):
"""
Getter method for track_interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/track_interface (leafref)
YANG Description: Sets an interface that should be
tracked for up/down events to dynamically change the
priority state of the VRRP group, and potentially
change the mastership if the tracked interface going
down lowers the priority sufficiently
"""
return self.__track_interface
def _set_track_interface(self, v, load=False):
"""
Setter method for track_interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/track_interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_track_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_track_interface() directly.
YANG Description: Sets an interface that should be
tracked for up/down events to dynamically change the
priority state of the VRRP group, and potentially
change the mastership if the tracked interface going
down lowers the priority sufficiently
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """track_interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="track-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=False)""",
}
)
self.__track_interface = t
if hasattr(self, "_set"):
self._set()
def _unset_track_interface(self):
self.__track_interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="track-interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
def _get_priority_decrement(self):
"""
Getter method for priority_decrement, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/priority_decrement (uint8)
YANG Description: Set the value to subtract from priority when
the tracked interface goes down
"""
return self.__priority_decrement
def _set_priority_decrement(self, v, load=False):
"""
Setter method for priority_decrement, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/interface_tracking/state/priority_decrement (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_decrement is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_decrement() directly.
YANG Description: Set the value to subtract from priority when
the tracked interface goes down
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority_decrement must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(0), is_leaf=True, yang_name="priority-decrement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""",
}
)
self.__priority_decrement = t
if hasattr(self, "_set"):
self._set()
def _unset_priority_decrement(self):
self.__priority_decrement = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
0
),
is_leaf=True,
yang_name="priority-decrement",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=False,
)
track_interface = __builtin__.property(_get_track_interface)
priority_decrement = __builtin__.property(_get_priority_decrement)
_pyangbind_elements = OrderedDict(
[
("track_interface", track_interface),
("priority_decrement", priority_decrement),
]
)
| |
from sympy import (meijerg, I, S, integrate, Integral, oo, gamma,
hyperexpand, exp, simplify, sqrt, pi, erf, sin, cos,
exp_polar, polar_lift, polygamma, hyper, log, expand_func)
from sympy.integrals.meijerint import (_rewrite_single, _rewrite1,
meijerint_indefinite, _inflate_g, _create_lookup_table,
meijerint_definite, meijerint_inversion)
from sympy.utilities.randtest import (test_numerically,
random_complex_number as randcplx)
from sympy.abc import x, y, a, b, c, d, s, t, z
def test_rewrite_single():
def t(expr, c, m):
e = _rewrite_single(meijerg([a], [b], [c], [d], expr), x)
assert e is not None
assert isinstance(e[0][0][2], meijerg)
assert e[0][0][2].argument.as_coeff_mul(x) == (c, (m,))
def tn(expr):
assert _rewrite_single(meijerg([a], [b], [c], [d], expr), x) is None
t(x, 1, x)
t(x**2, 1, x**2)
t(x**2 + y*x**2, y + 1, x**2)
tn(x**2 + x)
tn(x**y)
def u(expr, x):
from sympy import Add, exp, exp_polar
r = _rewrite_single(expr, x)
e = Add(*[res[0]*res[2] for res in r[0]]).replace(exp_polar, exp) # XXX Hack?
assert test_numerically(e, expr, x)
u(exp(-x)*sin(x), x)
# The following has stopped working because hyperexpand changed slightly.
# It is probably not worth fixing
#u(exp(-x)*sin(x)*cos(x), x)
# This one cannot be done numerically, since it comes out as a g-function
# of argument 4*pi
# NOTE This also tests a bug in inverse mellin transform (which used to
# turn exp(4*pi*I*t) into a factor of exp(4*pi*I)**t instead of
# exp_polar).
#u(exp(x)*sin(x), x)
assert _rewrite_single(exp(x)*sin(x), x) == \
([(-sqrt(2)/(2*sqrt(pi)), 0,
meijerg(((-S(1)/2, 0, S(1)/4, S(1)/2, S(3)/4), (1,)),
((), (-S(1)/2, 0)), 64*exp_polar(-4*I*pi)/x**4))], True)
def test_rewrite1():
assert _rewrite1(x**3*meijerg([a], [b], [c], [d], x**2 + y*x**2)*5, x) \
== (5, x**3, [(1, 0, meijerg([a], [b], [c], [d], x**2*(y + 1)))], \
True)
def test_meijerint_indefinite_numerically():
def t(fac, arg):
g = meijerg([a], [b], [c], [d], arg)*fac
subs = {a: randcplx()/10, b:randcplx()/10 + I,
c: randcplx(), d: randcplx()}
integral = meijerint_indefinite(g, x)
assert integral is not None
assert test_numerically(g.subs(subs), integral.diff(x).subs(subs), x)
t(1, x)
t(2, x)
t(1, 2*x)
t(1, x**2)
t(5, x**S('3/2'))
t(x**3, x)
t(3*x**S('3/2'), 4*x**S('7/3'))
def test_inflate():
subs = {a: randcplx()/10, b: randcplx()/10 + I, c: randcplx(),
d: randcplx(), y:randcplx()/10}
def t(a, b, arg, n):
from sympy import Mul
m1 = meijerg(a, b, arg)
m2 = Mul(*_inflate_g(m1, n))
# NOTE: (the random number)**9 must still be on the principal sheet.
# Thus make b&d small to create random numbers of small imaginary part.
return test_numerically(m1.subs(subs), m2.subs(subs), x, b=0.1, d=-0.1)
assert t([[a], [b]], [[c], [d]], x, 3)
assert t([[a, y], [b]], [[c], [d]], x, 3)
assert t([[a], [b]], [[c, y], [d]], 2*x**3, 3)
def test_recursive():
from sympy import symbols, exp_polar, expand
a, b, c = symbols('a b c', positive=True)
assert simplify(integrate(exp(-(x-a)**2)*exp(-(x - b)**2), (x, 0, oo))) \
== sqrt(2*pi)/4*(1 + erf(sqrt(2)/2*(a + b))) \
*exp(-a**2 - b**2 + (a + b)**2/2)
assert simplify(integrate
(exp(-(x - a)**2)*exp(-(x - b)**2)*exp(c*x), (x, 0, oo))) \
== sqrt(2*pi)/4*(1 + erf(sqrt(2)/4*(2*a + 2*b + c))) \
*exp(-a**2 - b**2 + (2*a + 2*b + c)**2/8)
assert simplify(integrate(exp(-(x - a - b - c)**2), (x, 0, oo))) \
== sqrt(pi)/2*(1 + erf(a + b + c))
assert simplify(integrate(exp(-(x + a + b + c)**2), (x, 0, oo))) \
== sqrt(pi)/2*(1 - erf(a + b + c))
def test_meijerint():
from sympy import symbols, expand, arg
s, t, mu = symbols('s t mu', real=True)
assert integrate(meijerg([], [], [0], [], s*t)
*meijerg([], [], [mu/2], [-mu/2], t**2/4),
(t, 0, oo)).is_Piecewise
s = symbols('s', positive=True)
assert integrate(x**s*meijerg([[],[]], [[0],[]], x), (x, 0, oo)) \
== gamma(s + 1)
assert integrate(x**s*meijerg([[],[]], [[0],[]], x), (x, 0, oo),
meijerg=True) == gamma(s + 1)
assert isinstance(integrate(x**s*meijerg([[],[]], [[0],[]], x),
(x, 0, oo), meijerg=False),
Integral)
assert meijerint_indefinite(exp(x), x) == exp(x)
# TODO what simplifications should be done automatically?
# This tests "extra case" for antecedents_1.
a, b = symbols('a b', positive=True)
assert simplify(meijerint_definite(x**a, x, 0, b)[0]) \
== b**(a + 1)/(a + 1)
# This tests various conditions and expansions:
meijerint_definite((x+1)**3*exp(-x), x, 0, oo) == (16, True)
# Again, how about simplifications?
sigma, mu = symbols('sigma mu', positive=True)
i, c = meijerint_definite(exp(-((x - mu)/(2*sigma))**2), x, 0, oo)
assert simplify(i) \
== sqrt(pi)*sigma*(erf(mu/(2*sigma)) + 1)
assert c is True
i, _ = meijerint_definite(exp(-mu*x)*exp(sigma*x), x, 0, oo)
# TODO it would be nice to test the condition
assert simplify(i) == 1/(mu - sigma)
# Test substitutions to change limits
assert meijerint_definite(exp(x), x, -oo, 2) == (exp(2), True)
assert expand(meijerint_definite(exp(x), x, 0, I)[0]) == exp(I) - 1
assert expand(meijerint_definite(exp(-x), x, 0, x)[0]) == \
1 - exp(-exp(I*arg(x))*abs(x))
# Test -oo to oo
assert meijerint_definite(exp(-x**2), x, -oo, oo) == (sqrt(pi), True)
assert meijerint_definite(exp(-abs(x)), x, -oo, oo) == (2, True)
assert meijerint_definite(exp(-(2*x-3)**2), x, -oo, oo) == \
(sqrt(pi)/2, True)
assert meijerint_definite(exp(-abs(2*x-3)), x, -oo, oo) == (1, True)
assert meijerint_definite(exp(-((x - mu)/sigma)**2/2)/sqrt(2*pi*sigma**2),
x, -oo, oo) == (1, True)
# Test one of the extra conditions for 2 g-functinos
assert meijerint_definite(exp(-x)*sin(x), x, 0, oo) == (S(1)/2, True)
# Test a bug
def res(n): return (1/(1+x**2)).diff(x, n).subs(x,1)*(-1)**n
for n in range(6):
assert integrate(exp(-x)*sin(x)*x**n, (x, 0, oo), meijerg=True) == \
res(n)
# This used to test trigexpand... now it is done by linear substitution
assert simplify(integrate(exp(-x)*sin(x + a), (x, 0, oo), meijerg=True)
).expand().rewrite(sin).expand() == sin(a)/2 + cos(a)/2
# Test the condition 14 from prudnikov.
# (This is besselj*besselj in disguise, to stop the product from being
# recognised in the tables.)
a, b, s = symbols('a b s')
from sympy import And, re
assert meijerint_definite(meijerg([], [], [a/2], [-a/2], x/4) \
*meijerg([], [], [b/2], [-b/2], x/4)*x**(s-1), x, 0, oo) == \
(4*2**(2*s - 2)*gamma(-2*s + 1)*gamma(a/2 + b/2 + s) \
/(gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1) \
*gamma(a/2 + b/2 - s + 1)),
And(0 < -2*re(4*s) + 8, 0 < re(a/2 + b/2 + s), re(2*s) < 1))
# test a bug
assert integrate(sin(x**a)*sin(x**b), (x, 0, oo), meijerg=True) == \
Integral(sin(x**a)*sin(x**b), (x, 0, oo))
# test better hyperexpand
assert integrate(exp(-x**2)*log(x), (x, 0, oo), meijerg=True) == \
(sqrt(pi)*polygamma(0, S(1)/2)/4).expand()
# Test hyperexpand bug.
from sympy import lowergamma
n = symbols('n', integer = True)
assert simplify(integrate(exp(-x)*x**n, x, meijerg=True)) == \
lowergamma(n + 1, x)
# Test a bug with argument 1/x
alpha = symbols('alpha', positive=True)
assert meijerint_definite((2-x)**alpha*sin(alpha/x), x, 0, 2) == \
(sqrt(pi)*gamma(alpha + 1) \
*meijerg([S(1)/2, 0, S(1)/2], [1], [],
[-alpha/2, -alpha/2 - S(1)/2], 16/alpha**2), True)
# test a bug related to 3016
a, s = symbols('a s', positive=True)
assert simplify(integrate(x**s*exp(-a*x**2), (x, -oo, oo))) == \
a**(-s/2 - S(1)/2)*(exp(I*pi*s) + 1)*gamma(s/2 + S(1)/2)/2
def test_bessel():
from sympy import (besselj, Heaviside, besseli, polar_lift, exp_polar,
powdenest)
assert simplify(integrate(besselj(a, z)*besselj(b, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == \
2*sin(pi*(a/2 - b/2))/(pi*(a - b)*(a + b))
assert simplify(integrate(besselj(a, z)*besselj(a, z)/z, (z, 0, oo),
meijerg=True, conds='none')) == 1/(2*a)
# TODO more orthogonality integrals
assert simplify(integrate(sin(z*x)*(x**2-1)**(-(y+S(1)/2)),
(x, 1, oo), meijerg=True, conds='none')
*2/((z/2)**y*sqrt(pi)*gamma(S(1)/2-y))) == \
besselj(y, z)
# Werner Rosenheinrich
# SOME INDEFINITE INTEGRALS OF BESSEL FUNCTIONS
assert integrate(x*besselj(0, x), x, meijerg=True) == x*besselj(1, x)
assert integrate(x*besseli(0, x), x, meijerg=True) == x*besseli(1, x)
# TODO can do higher powers, but come out as high order ... should they be
# reduced to order 0, 1?
assert integrate(besselj(1, x), x, meijerg=True) == -besselj(0, x)
assert integrate(besselj(1, x)**2/x, x, meijerg=True) == \
-(besselj(0, x)**2 + besselj(1, x)**2)/2
# TODO more besseli when tables are extended or recursive mellin works
assert integrate(besselj(0, x)**2/x**2, x, meijerg=True) == \
-2*x*besselj(0, x)**2 - 2*x*besselj(1, x)**2 \
+ 2*besselj(0, x)*besselj(1, x) - besselj(0, x)**2/x
assert integrate(besselj(0, x)*besselj(1, x), x, meijerg=True) == \
-besselj(0, x)**2/2
assert integrate(x**2*besselj(0, x)*besselj(1, x), x, meijerg=True) == \
x**2*besselj(1, x)**2/2
assert integrate(besselj(0, x)*besselj(1, x)/x, x, meijerg=True) == \
(x*besselj(0, x)**2 + x*besselj(1, x)**2 - \
besselj(0, x)*besselj(1, x))
# TODO how does besselj(0, a*x)*besselj(0, b*x) work?
# TODO how does besselj(0, x)**2*besselj(1, x)**2 work?
# TODO sin(x)*besselj(0, x) etc come out a mess
# TODO can x*log(x)*besselj(0, x) be done?
# TODO how does besselj(1, x)*besselj(0, x+a) work?
# TODO more indefinite integrals when struve functions etc are implemented
# test a substitution
assert integrate(besselj(1, x**2)*x, x, meijerg=True) == \
-besselj(0, x**2)/2
def test_inversion():
from sympy import piecewise_fold, besselj, sqrt, I, sin, cos, Heaviside
def inv(f): return piecewise_fold(meijerint_inversion(f, s, t))
assert inv(1/(s**2 + 1)) == sin(t)*Heaviside(t)
assert inv(s/(s**2 + 1)) == cos(t)*Heaviside(t)
assert inv(exp(-s)/s) == Heaviside(t - 1)
assert inv(1/sqrt(1 + s**2)) == besselj(0, t)*Heaviside(t)
# Test some antcedents checking.
assert meijerint_inversion(sqrt(s)/sqrt(1 + s**2), s, t) is None
assert inv(exp(s**2)) is None
assert meijerint_inversion(exp(-s**2), s, t) is None
def test_lookup_table():
from random import uniform, randrange
from sympy import Add, unpolarify, exp_polar, exp
from sympy.integrals.meijerint import z as z_dummy
table = {}
_create_lookup_table(table)
for _, l in sorted(table.items()):
for formula, terms, cond, hint in sorted(l):
subs = {}
for a in list(formula.free_symbols) + [z_dummy]:
if hasattr(a, 'properties') and a.properties:
# these Wilds match positive integers
subs[a] = randrange(1, 10)
else:
subs[a] = uniform(1.5, 3.5)
if not isinstance(terms, list):
terms = terms(subs)
# First test that hyperexpand can do this.
expanded = [hyperexpand(g) for (_, g) in terms]
assert all (x.is_Piecewise or not x.has(meijerg) for x in expanded)
# Now test that the meijer g-function is indeed as advertised.
expanded = Add(*[f*x for (f, x) in terms])
a, b = formula.n(subs=subs), expanded.n(subs=subs)
r = min(abs(a), abs(b))
if r < 1:
assert abs(a - b).n() <= 1e-10
else:
assert (abs(a - b)/r).n() <= 1e-10
def test_branch_bug():
from sympy import powdenest, lowergamma
# TODO combsimp cannot prove that the factor is unity
assert powdenest(integrate(erf(x**3), x, meijerg=True).diff(x),
polar=True) == 2*erf(x**3)*gamma(S(2)/3)/3/gamma(S(5)/3)
assert integrate(erf(x**3), x, meijerg=True) == \
2*x*erf(x**3)*gamma(S(2)/3)/(3*gamma(S(5)/3)) \
- 2*gamma(S(2)/3)*lowergamma(S(2)/3, x**6)/(3*sqrt(pi)*gamma(S(5)/3))
def test_linear_subs():
from sympy import besselj
assert integrate(sin(x-1), x, meijerg=True) == -cos(1 - x)
assert integrate(besselj(1, x-1), x, meijerg=True) == -besselj(0, 1 - x)
def test_probability():
# various integrals from probability theory
from sympy.abc import x, y, z
from sympy import symbols, Symbol, Abs, expand_mul, combsimp, powsimp, sin
mu1, mu2 = symbols('mu1 mu2', real=True, finite=True, bounded=True)
sigma1, sigma2 = symbols('sigma1 sigma2', real=True, finite=True,
bounded=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, bounded=True)
def normal(x, mu, sigma):
return 1/sqrt(2*pi*sigma**2)*exp(-(x - mu)**2/2/sigma**2)
def exponential(x, rate):
return rate*exp(-rate*x)
assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \
mu1
assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**2 + sigma1**2
assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \
== mu1**3 + 3*mu1*sigma1**2
assert integrate(normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1
assert integrate(x*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1
assert integrate(y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2
assert integrate(x*y*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1*mu2
assert integrate((x + y + 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2
assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
-1 + mu1 + mu2
i = integrate(x**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True)
assert not i.has(Abs)
assert simplify(i) == mu1**2 + sigma1**2
assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2),
(x, -oo, oo), (y, -oo, oo), meijerg=True) == \
sigma2**2 + mu2**2
assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1
assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \
1/rate
assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) \
== 2/rate**2
def E(expr):
res1 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(x, 0, oo), (y, -oo, oo), meijerg=True)
res2 = integrate(expr*exponential(x, rate)*normal(y, mu1, sigma1),
(y, -oo, oo), (x, 0, oo), meijerg=True)
assert expand_mul(res1) == expand_mul(res2)
return res1
assert E(1) == 1
assert E(x*y) == mu1/rate
assert E(x*y**2) == mu1**2/rate + sigma1**2/rate
ans = (rate**2*sigma1**2 + 1)/rate**2
assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans
assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans
assert simplify(E((x + y)**2) - E(x + y)**2) == ans
# Beta' distribution
alpha, beta = symbols('alpha beta', positive=True)
betadist = x**(alpha-1)*(1+x)**(-alpha - beta)*gamma(alpha + beta) \
/gamma(alpha)/gamma(beta)
assert integrate(betadist, (x, 0, oo), meijerg=True) == 1
i = integrate(x*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert (combsimp(i[0]), i[1]) == (alpha/(beta - 1), 1 < beta)
j = integrate(x**2*betadist, (x, 0, oo), meijerg=True, conds='separate')
assert j[1] == (1 < beta - 1)
assert combsimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \
/(beta - 2)/(beta - 1)**2
# Beta distribution
# NOTE: this is evaluated using antiderivatives. It also tests that
# meijerint_indefinite returns the simplest possible answer.
a, b = symbols('a b', positive=True)
betadist = x**(a - 1)*(-x + 1)**(b - 1)*gamma(a + b)/(gamma(a)*gamma(b))
assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1
assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \
a/(a + b)
assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \
a*(a + 1)/(a + b)/(a + b + 1)
assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \
gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y)
# Chi distribution
k = Symbol('k', integer=True, positive=True)
chi = 2**(1-k/2)*x**(k-1)*exp(-x**2/2)/gamma(k/2)
assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \
sqrt(2)*gamma((k + 1)/2)/gamma(k/2)
assert simplify(integrate(x**2*chi, (x, 0, oo), meijerg=True)) == k
# Chi^2 distribution
chisquared = 2**(-k/2)/gamma(k/2)*x**(k/2-1)*exp(-x/2)
assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1
assert simplify(integrate(x*chisquared, (x, 0, oo), meijerg=True)) == k
assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \
k*(k + 2)
assert combsimp(integrate(((x - k)/sqrt(2*k))**3*chisquared, (x, 0, oo),
meijerg=True)) == 2*sqrt(2)/sqrt(k)
# Dagum distribution
a, b, p = symbols('a b p', positive=True)
# XXX (x/b)**a does not work
dagum = a*p/x*(x/b)**(a*p)/(1 + x**a/b**a)**(p+1)
assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1
# XXX conditions are a mess
arg = x*dagum
assert simplify(integrate(arg, (x, 0, oo), meijerg=True, conds='none')
) == b*gamma(1 - 1/a)*gamma(p + 1/a)/gamma(p)
assert simplify(integrate(x*arg, (x, 0, oo), meijerg=True, conds='none')
) == b**2*gamma(1 - 2/a)*gamma(p + 2/a)/gamma(p)
# F-distribution
d1, d2 = symbols('d1 d2', positive=True)
f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1+d2))/x \
/gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2)
assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1
# TODO conditions are a mess
assert simplify(integrate(x*f, (x, 0, oo), meijerg=True, conds='none')
) == d2/(d2 - 2)
assert simplify(integrate(x**2*f, (x, 0, oo), meijerg=True, conds='none')
) == d2**2*(d1 + 2)/d1/(d2 - 4)/(d2 - 2)
# TODO gamma, rayleigh
# inverse gaussian
lamda, mu = symbols('lamda mu', positive=True)
dist = sqrt(lamda/2/pi)*x**(-S(3)/2)*exp(-lamda*(x - mu)**2/x/2/mu**2)
mysimp = lambda expr: simplify(expr.rewrite(exp))
assert mysimp(integrate(dist, (x, 0, oo))) == 1
assert mysimp(integrate(x*dist, (x, 0, oo))) == mu
assert mysimp(integrate((x - mu)**2*dist, (x, 0, oo))) == mu**3/lamda
assert mysimp(integrate((x - mu)**3*dist, (x, 0, oo))) == 3*mu**5/lamda**2
# Levi
c = Symbol('c', positive=True)
assert integrate(sqrt(c/2/pi)*exp(-c/2/(x - mu))/(x - mu)**S('3/2'),
(x, mu, oo)) == 1
# higher moments oo
# log-logistic
distn = (beta/alpha)*x**(beta-1)/alpha**(beta-1)\
/(1 + x**beta/alpha**beta)**2
assert simplify(integrate(distn, (x, 0, oo))) == 1
# NOTE the conditions are a mess, but correctly state beta > 1
assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \
pi*alpha/beta/sin(pi/beta)
# (similar comment for conditions applies)
assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \
pi*alpha**y*y/beta/sin(pi*y/beta)
# weibull
k = Symbol('k', positive=True)
n = Symbol('n', positive=True)
distn = k/lamda*(x/lamda)**(k-1)*exp(-(x/lamda)**k)
assert simplify(integrate(distn, (x, 0, oo))) == 1
assert simplify(integrate(x**n*distn, (x, 0, oo))) == \
lamda**n*gamma(1 + n/k)
# rice distribution
from sympy import besseli
nu, sigma = symbols('nu sigma', positive=True)
rice = x/sigma**2*exp(-(x**2+ nu**2)/2/sigma**2)*besseli(0, x*nu/sigma**2)
assert integrate(rice, (x, 0, oo), meijerg=True) == 1
# can someone verify higher moments?
# Laplace distribution
mu = Symbol('mu', real=True)
b = Symbol('b', positive=True)
laplace = exp(-abs(x - mu)/b)/2/b
assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1
assert integrate(x*laplace, (x, -oo, oo), meijerg=True) == mu
assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \
2*b**2 + mu**2
# TODO are there other distributions supported on (-oo, oo) that we can do?
# misc tests
k = Symbol('k', positive=True)
assert combsimp(expand_mul(integrate(log(x)*x**(k - 1)*exp(-x)/gamma(k),
(x, 0, oo)))) == polygamma(0, k)
def test_expint():
""" Test various exponential integrals. """
from sympy import (expint, unpolarify, Symbol, Ci, Si, Shi, Chi,
sin, cos, sinh, cosh, Ei)
assert simplify(unpolarify(integrate(exp(-z*x)/x**y, (x, 1, oo),
meijerg=True, conds='none'
).rewrite(expint).expand(func=True))) == expint(y, z)
assert integrate(exp(-z*x)/x, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(1, z)
assert integrate(exp(-z*x)/x**2, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(2, z).rewrite(Ei).rewrite(expint)
assert integrate(exp(-z*x)/x**3, (x, 1, oo), meijerg=True,
conds='none').rewrite(expint).expand() == \
expint(3, z).rewrite(Ei).rewrite(expint).expand()
t = Symbol('t', positive=True)
assert integrate(-cos(x)/x, (x, t, oo), meijerg=True).expand() == Ci(t)
assert integrate(-sin(x)/x, (x, t, oo), meijerg=True).expand() == \
Si(t) - pi/2
assert integrate(sin(x)/x, (x, 0, z), meijerg=True) == Si(z)
assert integrate(sinh(x)/x, (x, 0, z), meijerg=True) == Shi(z)
assert integrate(exp(-x)/x, x, meijerg=True).expand().rewrite(expint) == \
I*pi - expint(1, x)
assert integrate(exp(-x)/x**2, x, meijerg=True).rewrite(expint).expand() \
== expint(1, x) - exp(-x)/x - I*pi
u = Symbol('u', polar=True)
assert integrate(cos(u)/u, u, meijerg=True).expand().as_independent(u)[1] \
== Ci(u)
assert integrate(cosh(u)/u, u, meijerg=True).expand().as_independent(u)[1]\
== Chi(u)
assert integrate(expint(1, x), x, meijerg=True
).rewrite(expint).expand() == x*expint(1, x) - exp(-x)
assert integrate(expint(2, x), x, meijerg=True
).rewrite(expint).expand() == \
-x**2*expint(1, x)/2 + x*exp(-x)/2 - exp(-x)/2
assert simplify(unpolarify(integrate(expint(y,x), x,
meijerg=True).rewrite(expint).expand(func=True))) == \
-expint(y + 1, x)
assert integrate(Si(x), x, meijerg=True) == x*Si(x) + cos(x)
assert integrate(Ci(u), u, meijerg=True).expand() == u*Ci(u) - sin(u)
assert integrate(Shi(x), x, meijerg=True) == x*Shi(x) - cosh(x)
assert integrate(Chi(u), u, meijerg=True).expand() == u*Chi(u) - sinh(u)
assert integrate(Si(x)*exp(-x), (x, 0, oo), meijerg=True) == pi/4
assert integrate(expint(1, x)*sin(x), (x, 0, oo), meijerg=True) == log(2)/2
def test_messy():
from sympy import (laplace_transform, Si, Ci, Shi, Chi, atan, Piecewise,
atanh, acoth, E1, besselj, acosh, asin, Ne, And, re,
fourier_transform, sqrt, Abs)
assert laplace_transform(Si(x), x, s) == ((-atan(s) + pi/2)/s, 0, True)
assert laplace_transform(Shi(x), x, s) == (acoth(s)/s, 1, True)
# where should the logs be simplified?
assert laplace_transform(Chi(x), x, s) == \
((log(s**(-2)) - log((s**2 - 1)/s**2))/(2*s), 1, True)
# TODO maybe simplify the inequalities?
assert laplace_transform(besselj(a, x), x, s)[1:] == \
(0, And(S(0) < re(a/2) + S(1)/2, S(0) < re(a/2) + 1))
# NOTE s < 0 can be done, but argument reduction is not good enough yet
assert fourier_transform(besselj(1, x)/x, x, s, noconds=False) == \
(Piecewise((0, 1 < 4*abs(pi**2*s**2)),
(2*sqrt(-4*pi**2*s**2 + 1), True)), 0 < s)
# TODO FT(besselj(0,x)) - conditions are messy (but for acceptable reasons)
# - folding could be better
assert integrate(E1(x)*besselj(0, x), (x, 0, oo), meijerg=True) \
== log(1 + sqrt(2))
assert integrate(E1(x)*besselj(1, x), (x, 0, oo), meijerg=True) \
== log(S(1)/2 + sqrt(2)/2)
assert integrate(1/x/sqrt(1 - x**2), x, meijerg=True) == \
Piecewise((-acosh(1/x), 1 < abs(x**(-2))), (I*asin(1/x), True))
def test_3023():
assert integrate(exp(-I*x**2), (x, -oo, oo), meijerg=True) == \
-I*sqrt(pi)*exp(I*pi/4)
def test_3153():
expr = 1/x/(a+b*x)**(S(1)/3)
anti = integrate(expr, x, meijerg=True)
assert not expr.has(hyper)
# XXX the expression is a mess, but actually upon differentiation and
# putting in numerical values seems to work...
def test_3249():
assert integrate(exp(I*x)/(1 + x**2), (x, -oo, oo)).simplify().rewrite(exp) \
== pi*exp(-1)
def test_fresnel():
from sympy import fresnels, fresnelc
assert expand_func(integrate(sin(pi*x**2/2),x)) == fresnels(x)
assert expand_func(integrate(cos(pi*x**2/2),x)) == fresnelc(x)
| |
import os
import shutil
from unittesting import DeferrableTestCase, AWAIT_WORKER
from SublimeLinter.tests.parameterized import parameterized as p
from SublimeLinter.tests.mockito import (
contains,
mock,
unstub,
verify,
when,
)
import sublime
from SublimeLinter import lint
from SublimeLinter.lint import (
backend,
linter as linter_module,
util
)
from SublimeLinter.lint.base_linter import node_linter
def make_fake_linter(view):
class FakeLinter(lint.NodeLinter):
cmd = 'mylinter'
defaults = {
'selector': 'foo'
}
settings = linter_module.get_linter_settings(FakeLinter, view)
return FakeLinter(view, settings)
class TestNodeLinters(DeferrableTestCase):
@classmethod
def setUpClass(cls):
cls.view = sublime.active_window().new_file()
# make sure we have a window to work with
s = sublime.load_settings("Preferences.sublime-settings")
s.set("close_windows_when_empty", False)
# it's just faster if we mock this out
when(linter_module).register_linter(...).thenReturn(None)
@classmethod
def tearDownClass(cls):
if cls.view:
cls.view.set_scratch(True)
cls.view.close()
unstub()
def tearDown(self):
unstub()
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def patch_home(self, home):
previous_state = node_linter.HOME
node_linter.HOME = home
self.addCleanup(lambda: setattr(node_linter, 'HOME', previous_state))
def test_globally_installed(self):
linter = make_fake_linter(self.view)
when(util).which(...).thenReturn('fake.exe')
cmd = linter.get_cmd()
self.assertEqual(cmd, ['fake.exe'])
def test_not_globally_installed_warn(self):
linter = make_fake_linter(self.view)
when(linter.logger).warning(...).thenReturn(None)
cmd = linter.get_cmd()
self.assertEqual(cmd, None)
verify(linter.logger).warning(...)
@p.expand([
('/p',),
('/p/a',),
('/p/a/b',),
])
def test_locally_installed(self, ROOT_DIR):
PRESENT_BIN_PATH = os.path.join(ROOT_DIR, 'node_modules', '.bin')
when(self.view).file_name().thenReturn('/p/a/b/f.js')
linter = make_fake_linter(self.view)
when(shutil).which('mylinter', ...).thenReturn(None)
when(shutil).which('mylinter', path=PRESENT_BIN_PATH).thenReturn('fake.exe')
cmd = linter.get_cmd()
working_dir = linter.get_working_dir()
self.assertEqual(cmd, ['fake.exe'])
self.assertEqual(working_dir, ROOT_DIR)
@p.expand([
('do not go above home', '/a/home', '/a', '/a/home/a/b/f.js'),
('do not fallback to home', '/a/home', '/a/home', '/p/a/b/f.js'),
])
def test_home_dir_behavior(self, _doc, HOME_DIR, INSTALL_DIR, FILENAME):
PRESENT_BIN_PATH = os.path.join(INSTALL_DIR, 'node_modules', '.bin')
when(shutil).which(...).thenReturn(None)
when(shutil).which('mylinter', path=PRESENT_BIN_PATH).thenReturn('fake.exe')
self.patch_home(HOME_DIR)
when(self.view).file_name().thenReturn(FILENAME)
linter = make_fake_linter(self.view)
cmd = linter.get_cmd()
self.assertIsNone(cmd)
@p.expand([
('/p', '/p', {'dependencies': {'mylinter': '0.2'}}),
('/p/a', '/p/a', {'devDependencies': {'mylinter': '0.2'}}),
('/p/a', '/p', {'devDependencies': {'mylinter': '0.2'}}),
('/p/a/b', '/p/a/b', {'devDependencies': {'mylinter': '0.2'}}),
('/p/a/b', '/p/a', {'devDependencies': {'mylinter': '0.2'}}),
('/p/a/b', '/p', {'devDependencies': {'mylinter': '0.2'}}),
])
def test_locally_installed_with_package_json(self, ROOT_DIR, INSTALL_DIR, CONTENT):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
PRESENT_BIN_PATH = os.path.join(INSTALL_DIR, 'node_modules', '.bin')
when(self.view).file_name().thenReturn('/p/a/b/f.js')
linter = make_fake_linter(self.view)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(shutil).which(...).thenReturn(None)
when(shutil).which('mylinter', path=PRESENT_BIN_PATH).thenReturn('fake.exe')
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
cmd = linter.get_cmd()
working_dir = linter.get_working_dir()
self.assertEqual(cmd, ['fake.exe'])
self.assertEqual(working_dir, ROOT_DIR)
@p.expand([
('/p',),
('/p/a',),
])
def test_uninstalled_local_dependency_cant_read_package_json(self, ROOT_DIR):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
contains(
"We found a 'package.json' at {}; however, reading it raised"
.format(ROOT_DIR)
)
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
@p.expand([
('/p', {'dependencies': {'mylinter': '0.2'}}, 'dependency', False, False, 'npm'),
('/p', {'dependencies': {'mylinter': '0.2'}}, 'dependency', False, True, 'npm'),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, 'devDependency', False, False, 'npm'),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, 'devDependency', False, True, 'npm'),
('/p', {'dependencies': {'mylinter': '0.2'}}, 'dependency', True, False, 'yarn'),
('/p', {'dependencies': {'mylinter': '0.2'}}, 'dependency', True, True, 'npm'),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, 'devDependency', True, False, 'yarn'),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, 'devDependency', True, True, 'npm'),
])
def test_uninstalled_local_dependency(
self,
ROOT_DIR,
CONTENT,
DEPENDENCY_TYPE,
HAS_YARN_LOCK,
HAS_PACKAGE_LOCK,
EXPECTED_PACKAGE_MANAGER,
):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
PACKAGE_LOCK_FILE = os.path.join(ROOT_DIR, 'package-lock.json')
YARN_LOCK_FILE = os.path.join(ROOT_DIR, 'yarn.lock')
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
"Skipping 'mylinter' for now which is listed as a {} in {} but "
"not installed. Forgot to '{} install'?"
.format(DEPENDENCY_TYPE, PRESENT_PACKAGE_FILE, EXPECTED_PACKAGE_MANAGER)
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
if HAS_YARN_LOCK:
when(os.path).exists(YARN_LOCK_FILE).thenReturn(True)
if HAS_PACKAGE_LOCK:
when(os.path).exists(PACKAGE_LOCK_FILE).thenReturn(True)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
@p.expand([
('/p', {'bin': {'mylinter': 'fake.js'}}),
('/p/a', {'bin': {'mylinter': 'fake.js'}}),
])
def test_executing_bin_script(self, ROOT_DIR, CONTENT):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
BIN_FOLDER = os.path.join(ROOT_DIR, 'node_modules', '.bin')
SCRIPT_FILE = os.path.normcase(os.path.join(ROOT_DIR, 'fake.js'))
NODE_BIN = '/x/node'
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(os.path).exists(BIN_FOLDER).thenReturn(True)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
when(linter).which('node').thenReturn(NODE_BIN)
cmd = linter.get_cmd()
working_dir = linter.get_working_dir()
self.assertEqual(cmd, [NODE_BIN, SCRIPT_FILE])
self.assertEqual(working_dir, ROOT_DIR)
@p.expand([
('/p', {'bin': {'mylinter': 'fake.js'}}),
('/p/a', {'bin': {'mylinter': 'fake.js'}}),
])
def test_executing_bin_script_warn_prior_install(self, ROOT_DIR, CONTENT):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
SCRIPT_FILE = os.path.normcase(os.path.join(ROOT_DIR, 'fake.js'))
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
"We want to execute 'node {}'; but you should first 'npm install' "
"this project."
.format(SCRIPT_FILE)
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
@p.expand([
('/p', {'bin': {'mylinter': 'fake.js'}}),
('/p/a', {'bin': {'mylinter': 'fake.js'}}),
])
def test_executing_bin_script_warn_no_node(self, ROOT_DIR, CONTENT):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
BIN_FOLDER = os.path.join(ROOT_DIR, 'node_modules', '.bin')
SCRIPT_FILE = os.path.normcase(os.path.join(ROOT_DIR, 'fake.js'))
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
"We want to execute 'node {}'; however, finding a node executable failed."
.format(SCRIPT_FILE)
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(os.path).exists(BIN_FOLDER).thenReturn(True)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
when(linter).which('node').thenReturn(None)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
@p.expand([
({'bin': {'cli': 'fake.js'}},),
({'bin': 'otherthing.js'},),
])
def test_ignore_if_bin_does_not_contain_valid_information(self, CONTENT):
ROOT_DIR = '/p'
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
when(self.view).file_name().thenReturn('/p/a/f.js')
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
when(util).which(...).thenReturn('fake.exe')
linter = make_fake_linter(self.view)
cmd = linter.get_cmd()
self.assertEqual(cmd, ['fake.exe'])
@p.expand([
('/p', {'dependencies': {'mylinter': '0.2'}, 'installConfig': {'pnp': True}}, False),
('/p/a', {'devDependencies': {'mylinter': '0.2'}, 'installConfig': {'pnp': True}}, False),
('/p', {'dependencies': {'mylinter': '0.2'}}, True),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, True),
])
def test_installed_yarn_pnp_project(self, ROOT_DIR, CONTENT, PNP_JS_EXISTS):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
YARN_BIN = '/path/to/yarn'
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, 'yarn.lock')).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, '.pnp.js')).thenReturn(PNP_JS_EXISTS)
when(shutil).which(...).thenReturn(None)
when(shutil).which('yarn').thenReturn(YARN_BIN)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
cmd = linter.get_cmd()
working_dir = linter.get_working_dir()
self.assertEqual(cmd, [YARN_BIN, 'run', '--silent', 'mylinter'])
self.assertEqual(working_dir, ROOT_DIR)
@p.expand([
('/p', {'dependencies': {'mylinter': '0.2'}, 'installConfig': {'pnp': True}}),
('/p/a', {'devDependencies': {'mylinter': '0.2'}, 'installConfig': {'pnp': True}}),
('/p', {'dependencies': {'mylinter': '0.2'}}, True),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}, True),
])
def test_yarn_pnp_project_warn_no_yarn(self, ROOT_DIR, CONTENT, PNP_JS_EXISTS=False):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
"This seems like a Yarn PnP project. However, finding "
"a Yarn executable failed. Make sure to install Yarn first."
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, 'yarn.lock')).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, '.pnp.js')).thenReturn(PNP_JS_EXISTS)
when(shutil).which(...).thenReturn(None)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
@p.expand([
('/p', {'dependencies': {'mylinter': '0.2'}}),
('/p/a', {'devDependencies': {'mylinter': '0.2'}}),
])
def test_yarn_pnp_project_warn_not_completely_installed(self, ROOT_DIR, CONTENT):
PRESENT_PACKAGE_FILE = os.path.join(ROOT_DIR, 'package.json')
YARN_BIN = '/path/to/yarn'
when(self.view).file_name().thenReturn('/p/a/f.js')
linter = make_fake_linter(self.view)
when(linter).notify_failure().thenReturn(None)
when(linter.logger).warning(
"We did execute 'yarn run --silent mylinter' but "
"'mylinter' cannot be found. Forgot to 'yarn install'?"
).thenReturn(None)
exists = os.path.exists
when(os.path).exists(...).thenAnswer(exists)
when(os.path).exists(PRESENT_PACKAGE_FILE).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, 'yarn.lock')).thenReturn(True)
when(os.path).exists(os.path.join(ROOT_DIR, '.pnp.js')).thenReturn(True)
when(shutil).which(...).thenReturn(None)
when(shutil).which('yarn').thenReturn(YARN_BIN)
when(node_linter).read_json_file(PRESENT_PACKAGE_FILE).thenReturn(CONTENT)
when(linter)._communicate(...).thenReturn('error Command "mylinter" not found')
try:
linter.lint('foo', lambda: False)
except linter_module.PermanentError:
pass
verify(linter.logger).warning(...)
verify(linter).notify_failure()
def test_disable_if_not_dependency(self):
linter = make_fake_linter(self.view)
linter.settings['disable_if_not_dependency'] = True
when(linter).notify_unassign().thenReturn(None)
when(linter.logger).info(...).thenReturn(None)
try:
linter.get_cmd()
except linter_module.PermanentError:
pass
verify(linter.logger).info(
"Skipping 'fakelinter' since it is not installed locally.\nYou "
"can change this behavior by setting 'disable_if_not_dependency' "
"to 'false'."
)
verify(linter).notify_unassign()
def test_disable_if_not_dependency_2(self):
linter = make_fake_linter(self.view)
linter.settings['disable_if_not_dependency'] = True
view_has_changed = lambda: False
sink = mock()
when(sink).__call__(...).thenReturn(None)
backend.lint_view(
[{'name': linter.name, 'klass': linter.__class__, 'settings': linter.settings}],
self.view, view_has_changed, sink)
yield AWAIT_WORKER
verify(sink).__call__(linter.name, [])
| |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2018
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD10
"""
# import general python modules
import sys
import os
import numpy as np
import pandas as pd
import gdal
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
import urlparse
import math
import datetime
import requests
import glob
from joblib import Parallel, delayed
# Water Accounting modules
import wa.General.raster_conversions as RC
import wa.General.data_conversions as DC
from wa import WebAccounts
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, hdf_library, remove_hdf):
"""
This function downloads MOD10 8-daily data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -90 and 90)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
cores -- The number of cores used to run the routine. It can be 'False'
to avoid using parallel computing routines.
nameDownload -- The name of the subset that must be download can be Fpar_500m or Lai_500m
Waitbar -- 1 (Default) will print a waitbar
"""
# Check start and end date and otherwise set the date to max
if not Startdate:
Startdate = pd.Timestamp('2000-02-18')
if not Enddate:
Enddate = pd.Timestamp('Now')
# Make an array of the days of which the FPAR is taken
Dates = Make_TimeStamps(Startdate,Enddate)
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print 'Latitude above 90N or below 90S is not possible. Value set to maximum'
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print 'Longitude must be between 180E and 180W. Now value is set to maximum'
lonlim[0] = np.max(lonlim[0], -180)
lonlim[1] = np.min(lonlim[1], 180)
# Make directory for the MODIS FPAR data
Dir = Dir.replace("/", os.sep)
output_folder = os.path.join(Dir, 'MOD10')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Download list (txt file on the internet) which includes the lat and lon information of the MODIS tiles
nameDownloadtext = 'https://modis-land.gsfc.nasa.gov/pdf/sn_gring_10deg.txt'
file_nametext = os.path.join(output_folder, nameDownloadtext.split('/')[-1])
try:
try:
urllib.urlretrieve(nameDownloadtext, file_nametext)
except:
data = urllib2.urlopen(nameDownloadtext).read()
with open(file_nametext, "wb") as fp:
fp.write(data)
except:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
with open(file_nametext, "wb") as fp:
data = requests.get(nameDownloadtext, verify=False)
fp.write(data.content)
# Open text file with tiles which is downloaded before
tiletext=np.genfromtxt(file_nametext,skip_header=7,skip_footer=1,usecols=(0,1,2,3,4,5,6,7,8,9))
tiletext2=tiletext[tiletext[:,2]>=-900,:]
# This function converts the values in the text file into horizontal and vertical number of the tiles which must be downloaded to cover the extent defined by the user
TilesVertical, TilesHorizontal = Tiles_to_download(tiletext2=tiletext2,lonlim1=lonlim,latlim1=latlim)
# Pass variables to parallel function and run
args = [output_folder, TilesVertical, TilesHorizontal,lonlim, latlim, hdf_library]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
if remove_hdf == 1:
# Remove all .hdf files
os.chdir(output_folder)
files = glob.glob("*.hdf")
for f in files:
os.remove(os.path.join(output_folder, f))
# Remove all .txt files
files = glob.glob("*.txt")
for f in files:
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
"""
This function retrieves MOD15 FPAR data for a given date from the
http://e4ftl01.cr.usgs.gov/ server.
Keyword arguments:
Date -- 'yyyy-mm-dd'
args -- A list of parameters defined in the DownloadData function.
"""
# Argument
[output_folder, TilesVertical, TilesHorizontal,lonlim, latlim, hdf_library] = args
# Collect the data from the MODIS webpage and returns the data and lat and long in meters of those tiles
try:
Collect_data(TilesHorizontal, TilesVertical, Date, output_folder, hdf_library)
except:
print "Was not able to download the file"
# Define the output name of the collect data function
name_collect = os.path.join(output_folder, 'Merged.tif')
# Reproject the MODIS product to epsg_to
epsg_to ='4326'
name_reprojected = RC.reproject_MODIS(name_collect, epsg_to)
# Clip the data to the users extend
data, geo = RC.clip_data(name_reprojected, latlim, lonlim)
# Save the file as tiff
FPARfileName = os.path.join(output_folder, 'SnowFrac_MOD10_unitless_8-daily_' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '.tif')
DC.Save_as_tiff(name=FPARfileName, data=data, geo=geo, projection='WGS84')
# remove the side products
os.remove(os.path.join(output_folder, name_collect))
os.remove(os.path.join(output_folder, name_reprojected))
return True
def Make_TimeStamps(Startdate,Enddate):
'''
This function determines all time steps of which the FPAR must be downloaded
The time stamps are 8 daily.
Keywords arguments:
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
'''
# Define the DOY and year of the start day
DOY = datetime.datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday
Year = datetime.datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_year
# Define the year of the end day
YearEnd = datetime.datetime.strptime(Enddate,'%Y-%m-%d').timetuple().tm_year
# Change the DOY of the start day into a DOY of MODIS day (8-daily) and create new startdate
DOYstart = int(math.floor(DOY / 8.0) * 8) + 1
DOYstart = str('%s-%s' %(DOYstart, Year))
Day = datetime.datetime.strptime(DOYstart, '%j-%Y')
Month = '%02d' % Day.month
Day = '%02d' % Day.day
Startdate = (str(Year) + '-' + str(Month) + '-' + str(Day))
# Create the start and end data for the whole year
YearStartDate = pd.date_range(Startdate, Enddate, freq = 'AS')
YearEndDate = pd.date_range(Startdate, Enddate, freq = 'A')
# Define the amount of years that are involved
AmountOfYear = YearEnd - Year
# If the startday is not in the same year as the enddate
if AmountOfYear > 0:
for i in range(0, AmountOfYear+1):
if i is 0:
Startdate1 = Startdate
Enddate1 = YearEndDate[0]
Dates = pd.date_range(Startdate1, Enddate1, freq = '8D')
if i is AmountOfYear:
Startdate1 = YearStartDate[-1]
Enddate1 = Enddate
Dates1 = pd.date_range(Startdate1, Enddate1, freq = '8D')
Dates = Dates.union(Dates1)
if (i is not AmountOfYear and i is not 0):
Startdate1 = YearStartDate[i-AmountOfYear-1]
Enddate1 = YearEndDate[i]
Dates1 = pd.date_range(Startdate1, Enddate1, freq = '8D')
Dates = Dates.union(Dates1)
# If the startday is in the same year as the enddate
if AmountOfYear is 0:
Dates = pd.date_range(Startdate, Enddate, freq = '8D')
return(Dates)
def Collect_data(TilesHorizontal,TilesVertical,Date,output_folder, hdf_library):
'''
This function downloads all the needed MODIS tiles from https://n5eil01u.ecs.nsidc.org/MOST/MOD10A2.006/ as a hdf file.
Keywords arguments:
TilesHorizontal -- [TileMin,TileMax] max and min horizontal tile number
TilesVertical -- [TileMin,TileMax] max and min vertical tile number
Date -- 'yyyy-mm-dd'
output_folder -- 'C:/file/to/path/'
'''
# Make a new tile for the data
sizeX = int((TilesHorizontal[1] - TilesHorizontal[0] + 1) * 2400)
sizeY = int((TilesVertical[1] - TilesVertical[0] + 1) * 2400)
DataTot = np.zeros((sizeY, sizeX))
# Load accounts
username, password = WebAccounts.Accounts(Type = 'NASA')
# Download the MODIS FPAR data
url = 'https://n5eil01u.ecs.nsidc.org/MOST/MOD10A2.006/' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '/'
dataset = requests.get(url, allow_redirects=False,stream = True)
try:
get_dataset = requests.get(dataset.headers['location'], auth = (username,password),stream = True).content
except:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
get_dataset = requests.get(dataset.headers['location'], auth = (username, password), verify = False).content
soup = BeautifulSoup(get_dataset, "lxml")
if len(str(soup)) < 300:
print 'Download was not succesfull, please check NASA account'
sys.exit(1)
# Create the Lat and Long of the MODIS tile in meters
for Vertical in range(int(TilesVertical[0]), int(TilesVertical[1])+1):
Distance = 231.65635826395834*2 # resolution of a MODIS pixel in meter
countY=(TilesVertical[1] - TilesVertical[0] + 1) - (Vertical - TilesVertical[0])
for Horizontal in range(int(TilesHorizontal[0]), int(TilesHorizontal[1]) + 1):
countX=Horizontal - TilesHorizontal[0] + 1
for i in soup.findAll('a', attrs = {'href': re.compile('(?i)(hdf)$')}):
# Find the file with the wanted tile number
Vfile=str(i)[30:32]
Hfile=str(i)[27:29]
if int(Vfile) is int(Vertical) and int(Hfile) is int(Horizontal):
# Define the whole url name
full_url = urlparse.urljoin(url, i['href'])
# Reset the begin parameters for downloading
downloaded = 0
N=0
# if not downloaded try to download file
while downloaded == 0:
try:# open http and download whole .hdf
nameDownload_url = full_url
file_name = os.path.join(output_folder,nameDownload_url.split('/')[-1])
if os.path.isfile(file_name):
downloaded = 1
else:
x = requests.get(nameDownload_url, allow_redirects = False)
try:
y = requests.get(x.headers['location'], auth = (username, password))
except:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
y = requests.get(x.headers['location'], auth = (username, password), verify = False)
z = open(file_name, 'wb')
z.write(y.content)
z.close()
statinfo = os.stat(file_name)
# Say that download was succesfull
if int(statinfo.st_size) > 1000:
downloaded = 1
# If download was not succesfull
except:
# Try another time
N = N + 1
# Stop trying after 10 times
if N == 10:
print 'Data from ' + Date.strftime('%Y-%m-%d') + ' is not available'
downloaded = 1
try:
# Open .hdf only band with SnowFrac and collect all tiles to one array
scale_factor = 1
dataset = gdal.Open(file_name)
sdsdict = dataset.GetMetadata('SUBDATASETS')
sdslist = [sdsdict[k] for k in sdsdict.keys() if '_1_NAME' in k]
sds = []
for n in sdslist:
sds.append(gdal.Open(n))
full_layer = [i for i in sdslist if 'MOD_Grid_Snow_500m' in i]
idx = sdslist.index(full_layer[0])
if Horizontal == TilesHorizontal[0] and Vertical == TilesVertical[0]:
geo_t = sds[idx].GetGeoTransform()
# get the projection value
proj = sds[idx].GetProjection()
data = sds[idx].ReadAsArray()
countYdata = (TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[int((countYdata - 1) * 2400):int(countYdata * 2400), int((countX - 1) * 2400):int(countX * 2400)]=data * scale_factor
del data
# if the tile not exists or cannot be opened, create a nan array with the right projection
except:
if Horizontal==TilesHorizontal[0] and Vertical==TilesVertical[0]:
x1 = (TilesHorizontal[0] - 19) * 2400 * Distance
x4 = (TilesVertical[0] - 9) * 2400 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t=tuple(geo)
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
data=np.ones((2400, 2400)) * (-9999)
countYdata=(TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[(countYdata - 1) * 2400:countYdata * 2400,(countX - 1) * 2400:countX * 2400] = data * 0.01
# Make geotiff file
name2 = os.path.join(output_folder, 'Merged.tif')
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name2, DataTot.shape[1], DataTot.shape[0], 1, gdal.GDT_Float32, ['COMPRESS=LZW'])
try:
dst_ds.SetProjection(proj)
except:
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
x1 = (TilesHorizontal[0] - 18) * 2400 * Distance
x4 = (TilesVertical[0] - 9) * 2400 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t = tuple(geo)
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo_t)
dst_ds.GetRasterBand(1).WriteArray(DataTot)
dst_ds = None
sds = None
return()
| |
from __future__ import absolute_import
import logging
import six
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from sudo.views import redirect_to_sudo
from sentry import roles
from sentry.auth import access
from sentry.models import (
AuditLogEntry, Organization, OrganizationMember, OrganizationStatus, Project,
ProjectStatus, Team, TeamStatus
)
from sentry.utils import auth
from sentry.web.helpers import render_to_response
from sentry.api.serializers import serialize
logger = logging.getLogger(__name__)
audit_logger = logging.getLogger('sentry.audit.ui')
class OrganizationMixin(object):
# TODO(dcramer): move the implicit organization logic into its own class
# as it's only used in a single location and over complicates the rest of
# the code
def get_active_organization(self, request, organization_slug=None):
"""
Returns the currently active organization for the request or None
if no organization.
"""
# TODO(dcramer): this is a huge hack, and we should refactor this
# it is currently needed to handle the is_auth_required check on
# OrganizationBase
active_organization = getattr(self, '_active_org', None)
cached_active_org = (
active_organization
and active_organization[0].slug == organization_slug
and active_organization[1] == request.user
)
if cached_active_org:
return active_organization[0]
active_organization = None
is_implicit = organization_slug is None
if is_implicit:
organization_slug = request.session.get('activeorg')
if organization_slug is not None:
if request.is_superuser():
try:
active_organization = Organization.objects.get_from_cache(
slug=organization_slug,
)
if active_organization.status != OrganizationStatus.VISIBLE:
raise Organization.DoesNotExist
except Organization.DoesNotExist:
logger.info('Active organization [%s] not found',
organization_slug)
if active_organization is None:
organizations = Organization.objects.get_for_user(
user=request.user,
)
if active_organization is None and organization_slug:
try:
active_organization = six.next(
o for o in organizations
if o.slug == organization_slug
)
except StopIteration:
logger.info('Active organization [%s] not found in scope',
organization_slug)
if is_implicit:
del request.session['activeorg']
active_organization = None
if active_organization is None:
if not is_implicit:
return None
try:
active_organization = organizations[0]
except IndexError:
logger.info('User is not a member of any organizations')
pass
if active_organization and self._is_org_member(request.user, active_organization):
if active_organization.slug != request.session.get('activeorg'):
request.session['activeorg'] = active_organization.slug
self._active_org = (active_organization, request.user)
return active_organization
def _is_org_member(self, user, organization):
return OrganizationMember.objects.filter(
user=user,
organization=organization,
).exists()
def get_active_team(self, request, organization, team_slug):
"""
Returns the currently selected team for the request or None
if no match.
"""
try:
team = Team.objects.get_from_cache(
slug=team_slug,
organization=organization,
)
except Team.DoesNotExist:
return None
if team.status != TeamStatus.VISIBLE:
return None
return team
def get_active_project(self, request, organization, project_slug):
try:
project = Project.objects.get_from_cache(
slug=project_slug,
organization=organization,
)
except Project.DoesNotExist:
return None
if project.status != ProjectStatus.VISIBLE:
return None
return project
def redirect_to_org(self, request):
from sentry import features
# TODO(dcramer): deal with case when the user cannot create orgs
organization = self.get_active_organization(request)
if organization:
url = reverse('sentry-organization-home', args=[organization.slug])
elif not features.has('organizations:create'):
return self.respond('sentry/no-organization-access.html', status=403)
else:
url = reverse('sentry-create-organization')
return HttpResponseRedirect(url)
class BaseView(View, OrganizationMixin):
auth_required = True
# TODO(dcramer): change sudo so it can be required only on POST
sudo_required = False
def __init__(self, auth_required=None, sudo_required=None, *args, **kwargs):
if auth_required is not None:
self.auth_required = auth_required
if sudo_required is not None:
self.sudo_required = sudo_required
super(BaseView, self).__init__(*args, **kwargs)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if self.is_auth_required(request, *args, **kwargs):
return self.handle_auth_required(request, *args, **kwargs)
if self.is_sudo_required(request, *args, **kwargs):
return self.handle_sudo_required(request, *args, **kwargs)
args, kwargs = self.convert_args(request, *args, **kwargs)
request.access = self.get_access(request, *args, **kwargs)
if not self.has_permission(request, *args, **kwargs):
return self.handle_permission_required(request, *args, **kwargs)
self.request = request
self.default_context = self.get_context_data(request, *args, **kwargs)
return self.handle(request, *args, **kwargs)
def get_access(self, request, *args, **kwargs):
return access.DEFAULT
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle(self, request, *args, **kwargs):
return super(BaseView, self).dispatch(request, *args, **kwargs)
def is_auth_required(self, request, *args, **kwargs):
return (
self.auth_required
and not (request.user.is_authenticated() and request.user.is_active)
)
def handle_auth_required(self, request, *args, **kwargs):
auth.initiate_login(request, next_url=request.get_full_path())
if 'organization_slug' in kwargs:
redirect_to = reverse('sentry-auth-organization',
args=[kwargs['organization_slug']])
else:
redirect_to = auth.get_login_url()
return self.redirect(redirect_to)
def is_sudo_required(self, request, *args, **kwargs):
return self.sudo_required and not request.is_sudo()
def handle_sudo_required(self, request, *args, **kwargs):
return redirect_to_sudo(request.get_full_path())
def has_permission(self, request, *args, **kwargs):
return True
def handle_permission_required(self, request, *args, **kwargs):
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def get_no_permission_url(request, *args, **kwargs):
return reverse('sentry-login')
def get_context_data(self, request, **kwargs):
context = csrf(request)
return context
def respond(self, template, context=None, status=200):
default_context = self.default_context
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request,
status=status)
def redirect(self, url):
return HttpResponseRedirect(url)
def get_team_list(self, user, organization):
return Team.objects.get_for_user(
organization=organization,
user=user,
with_projects=True,
)
def create_audit_entry(self, request, transaction_id=None, **kwargs):
entry = AuditLogEntry(
actor=request.user if request.user.is_authenticated() else None,
# TODO(jtcunning): assert that REMOTE_ADDR is a real IP.
ip_address=request.META['REMOTE_ADDR'],
**kwargs
)
# Only create a real AuditLogEntry record if we are passing an event type
# otherwise, we want to still log to our actual logging
if entry.event is not None:
entry.save()
extra = {
'ip_address': entry.ip_address,
'organization_id': entry.organization_id,
'object_id': entry.target_object,
'entry_id': entry.id,
'actor_label': entry.actor_label
}
if transaction_id is not None:
extra['transaction_id'] = transaction_id
audit_logger.info(entry.get_event_display(), extra=extra)
return entry
class OrganizationView(BaseView):
"""
Any view acting on behalf of an organization should inherit from this base.
The 'organization' keyword argument is automatically injected into the
resulting dispatch.
"""
required_scope = None
valid_sso_required = True
def get_access(self, request, organization, *args, **kwargs):
if organization is None:
return access.DEFAULT
return access.from_request(request, organization)
def get_context_data(self, request, organization, **kwargs):
context = super(OrganizationView, self).get_context_data(request)
context['organization'] = organization
context['TEAM_LIST'] = self.get_team_list(request.user, organization)
context['ACCESS'] = request.access.to_django_context()
return context
def has_permission(self, request, organization, *args, **kwargs):
if organization is None:
return False
if self.valid_sso_required:
if not request.access.sso_is_valid:
return False
if self.needs_sso(request, organization):
return False
if self.required_scope and not request.access.has_scope(self.required_scope):
logger.info('User %s does not have %s permission to access organization %s',
request.user, self.required_scope, organization)
return False
return True
def is_auth_required(self, request, organization_slug=None, *args, **kwargs):
result = super(OrganizationView, self).is_auth_required(
request, *args, **kwargs
)
if result:
return result
# if the user is attempting to access an organization that *may* be
# accessible if they simply re-authenticate, we want to allow that
# this opens up a privacy hole, but the pros outweigh the cons
if not organization_slug:
return False
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if not active_organization:
try:
Organization.objects.get_from_cache(slug=organization_slug)
except Organization.DoesNotExist:
pass
else:
return True
return False
def handle_permission_required(self, request, organization, *args, **kwargs):
if self.needs_sso(request, organization):
logger.info('access.must-sso', extra={
'organization_id': organization.id,
'user_id': request.user.id,
})
auth.initiate_login(request, next_url=request.get_full_path())
redirect_uri = reverse('sentry-auth-organization',
args=[organization.slug])
else:
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def needs_sso(self, request, organization):
if not organization:
return False
# XXX(dcramer): this branch should really never hit
if not request.user.is_authenticated():
return False
if not self.valid_sso_required:
return False
if not request.access.requires_sso:
return False
if not auth.has_completed_sso(request, organization.id):
return True
if not request.access.sso_is_valid:
return True
return False
def convert_args(self, request, organization_slug=None, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
kwargs['organization'] = active_organization
return (args, kwargs)
def get_allowed_roles(self, request, organization, member=None):
can_admin = request.access.has_scope('member:admin')
allowed_roles = []
if can_admin and not request.is_superuser():
acting_member = OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
if member and roles.get(acting_member.role).priority < roles.get(member.role).priority:
can_admin = False
else:
allowed_roles = [
r for r in roles.get_all()
if r.priority <= roles.get(acting_member.role).priority
]
can_admin = bool(allowed_roles)
elif request.is_superuser():
allowed_roles = roles.get_all()
return (can_admin, allowed_roles,)
class TeamView(OrganizationView):
"""
Any view acting on behalf of a team should inherit from this base and the
matching URL pattern must pass 'team_slug'.
Two keyword arguments are added to the resulting dispatch:
- organization
- team
"""
def get_context_data(self, request, organization, team, **kwargs):
context = super(TeamView, self).get_context_data(request, organization)
context['team'] = team
return context
def has_permission(self, request, organization, team, *args, **kwargs):
if team is None:
return False
rv = super(TeamView, self).has_permission(request, organization)
if not rv:
return rv
if self.required_scope:
if not request.access.has_team_scope(team, self.required_scope):
logger.info('User %s does not have %s permission to access team %s',
request.user, self.required_scope, team)
return False
elif not request.access.has_team(team):
logger.info('User %s does not have access to team %s',
request.user, team)
return False
return True
def convert_args(self, request, organization_slug, team_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_team = self.get_active_team(
request=request,
team_slug=team_slug,
organization=active_organization,
)
else:
active_team = None
kwargs['organization'] = active_organization
kwargs['team'] = active_team
return (args, kwargs)
class ProjectView(TeamView):
"""
Any view acting on behalf of a project should inherit from this base and the
matching URL pattern must pass 'team_slug' as well as 'project_slug'.
Three keyword arguments are added to the resulting dispatch:
- organization
- team
- project
"""
def get_context_data(self, request, organization, team, project, **kwargs):
context = super(ProjectView, self).get_context_data(request, organization, team)
context['project'] = project
context['processing_issues'] = serialize(project).get('processingIssues', 0)
return context
def has_permission(self, request, organization, team, project, *args, **kwargs):
if project is None:
return False
if team is None:
return False
rv = super(ProjectView, self).has_permission(request, organization, team)
if not rv:
return rv
if self.required_scope:
if not request.access.has_team_scope(team, self.required_scope):
logger.info('User %s does not have %s permission to access project %s',
request.user, self.required_scope, project)
return False
elif not request.access.has_team(team):
logger.info('User %s does not have access to project %s',
request.user, project)
return False
return True
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_project = self.get_active_project(
request=request,
organization=active_organization,
project_slug=project_slug,
)
else:
active_project = None
if active_project:
active_team = active_project.team
else:
active_team = None
kwargs['project'] = active_project
kwargs['team'] = active_team
kwargs['organization'] = active_organization
return (args, kwargs)
| |
# coding: utf-8
# In[4]:
import numpy as np
import pandas as pd
from sknn.mlp import Classifier, Layer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from multiprocessing import Pool, TimeoutError
from multiprocessing import cpu_count
from datetime import timedelta
import sys
import csv
import itertools
import time
# In[2]:
def two_layers_nnet(X_train,
Y_train,
X_test,
Y_test,
method1="Tanh",
neurons1=5,
method2="",
neurons2=0,
decay=0.0001,
learning_rate=0.001,
n_iter=25,
random_state=1):
"""
Parameters
----------
X_train : pandas data frame
data frame of features for the training set
Y_train : pandas data frame
data frame of labels for the training set
X_test : pandas data frame
data frame of features for the test set
Y_test : pandas data frame
data frame of labels for the test set
method1 : str
method used for the first layer
neurons1 : int
number of neurons of the first layer
method2 : None
method used for the first layer
neurons2 : int
number of neurons of the first layer
decay : float
weight decay
learning_rate : float
learning rate
n_iter : int
number of iterations
random_state : int
seed for weight initialization
Result:
-------
numpy array
logloss : averaged logarithmic loss
miss_err : missclassification error rate
prec : precision
recall : recall
f1 : f1 score
parameters : previous parameters in the order previously specified
"""
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Layers
if neurons2 == 0 :
layers=[Layer(method1, weight_decay = decay, units = neurons1),
Layer("Softmax")]
else:
layers=[Layer(method1, weight_decay = decay, units = neurons1),
Layer(method2, weight_decay = decay, units = neurons2),
Layer("Softmax")]
## # Run nnet
# Define classifier
nn = Classifier(layers,
learning_rate=learning_rate,
random_state=random_state,
n_iter=n_iter)
# Fit
nn.fit(X_train, Y_train)
# Predict
Y_hat = nn.predict(X_test)
Y_probs = nn.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
## # Precision
prec = precision_score(y_true=Y_test, y_pred=Y_hat, labels=labels, average='micro')
## # Recal
recall = recall_score(y_true=Y_test, y_pred=Y_hat, labels=labels, average='micro')
## # F1
f1 = f1_score(y_true=Y_test, y_pred=Y_hat, labels=labels, average='micro')
# Summarized results
result = np.array([logloss,
miss_err,
prec,
recall,
f1,
method1,
neurons1,
method2,
neurons2,
decay,
learning_rate,
n_iter,
random_state])
return result
# In[3]:
def processInput(xxx_todo_changeme):
# Define parameters names
(X_train,Y_train,X_test,Y_test,parameters,index) = xxx_todo_changeme
method1,neurons1,method2,neurons2,decay,learning_rate,n_iter,random_state=parameters[index]
# Run nnet
result = two_layers_nnet(X_train,
Y_train,
X_test,
Y_test,
method1,
neurons1,
method2,
neurons2,
decay,
learning_rate,
n_iter,
random_state)
return result
def two_layers_nnet_simulation(X_train,
Y_train,
X_test,
Y_test,
method1,
neurons1,
method2,
neurons2,
decay,
learning_rate,
n_iter,
random_state):
"""
Parameters:
-----------
Same parameters as two_layers_nnet, in a list format.
Result:
------
List of Lists of results from two_layers_nnet.
One list corresponds to one set of parameters
"""
print('Lauching Simulation...')
start = time.time()
# Combinations
param = np.array([method1,
neurons1,
method2,
neurons2,
decay,
learning_rate,
n_iter,
random_state])
parameters = list(itertools.product(*param))
indexes = list(range(len(parameters)))
print("Number of sets of parameters: %s.\n" %len(parameters))
print('Parameters:\n-----------')
print(np.array(parameters))
# Number of clusters
num_cpu = cpu_count()
print("\nNumber of identified CPUs: %s.\n" %num_cpu)
num_clusters = min(num_cpu,len(parameters))
## # Parallelization
tuples_indexes = tuple([(X_train,Y_train,X_test,Y_test,parameters,index) for index in indexes])
# Start clusters
print('Start %s clusters.\n' % num_clusters)
print('Running...')
pool = Pool(processes=num_clusters)
results = pool.map(processInput, tuples_indexes)
pool.terminate()
# Results
print('Results:\n--------')
print(results)
end = time.time()
elapsed = end - start
print('End of Simulation.\nElapsed time: %s' %str(timedelta(seconds=elapsed)))
print('Write into csv...')
return results
# In[4]:
def two_layers_nnet_predict(X_train,
Y_train,
X_test,
method1="Tanh",
neurons1=5,
method2="",
neurons2=0,
decay=0.0001,
learning_rate=0.001,
n_iter=25,
random_state=1):
"""
Parameters
----------
X_train : pandas data frame
data frame of features for the training set
Y_train : pandas data frame
data frame of labels for the training set
X_test : pandas data frame
data frame of features for the test set
method1 : str
method used for the first layer
neurons1 : int
number of neurons of the first layer
method2 : None
method used for the first layer
neurons2 : int
number of neurons of the first layer
decay : float
weight decay
learning_rate : float
learning rate
n_iter : int
number of iterations
random_state : int
seed for weight initialization
Result:
-------
tuple of numpy arrays
(predicted classes, predicted probabilities)
"""
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
## # Split data set into train/test
# Layers
if neurons2 == 0 :
layers=[Layer(method1, weight_decay = decay, units = neurons1),
Layer("Softmax")]
else:
layers=[Layer(method1, weight_decay = decay, units = neurons1),
Layer(method2, weight_decay = decay, units = neurons2),
Layer("Softmax")]
## # Run nnet
# Define classifier
nn = Classifier(layers,
learning_rate=learning_rate,
random_state=random_state,
n_iter=n_iter)
# Fit
nn.fit(X_train, Y_train)
# Predict
Y_hat = nn.predict(X_test)
Y_probs = nn.predict_proba(X_test)
# Summarized results
result = (Y_hat,Y_probs)
return result
| |
from eLCS.Constants import cons
from eLCS.Classifier import Classifier
import random
import copy
class ClassifierSet(object):
"""This module handles all the classifier sets
This includes the population, match set and correct sets along with mechanisms and
heuristics that act on these sets.
This class can be initialized with the:
1. Creation of a new population, or
2. Reboots the population (i.e. read in from a previously saved population)
"""
def __init__(self, pop_reboot_path=None):
"""Initializes the Classifier Set
:param str pop_reboot_path: Path to the population, defaults to None
"""
# Major Parameters
self.popSet = [] # List of classifiers/rules (list of ClassiferSet objects)
self.matchSet = [] # List of references to rules in population that match
self.correctSet = [] # List of references to rules in population that both match and specify correct phenotype
self.microPopSize = 0 # Tracks the current micro population size, i.e. the population size which takes rule numerosity into account.
self.runtimeParams = [] # List that stores the result at each iteration
# Evaluation Parameters
self.aveGenerality = 0.0
self.expRules = 0.0
self.attributeSpecList = []
self.attributeAccList = []
self.avePhenotypeRange = 0.0
# Set Constructors
if pop_reboot_path == None:
# Initialize a new population
self.makePop()
elif isinstance(pop_reboot_path, str):
# Initialize a population based on an existing saved rule population
self.rebootPop(pop_reboot_path)
else:
print("ClassifierSet: Error building population.")
# Population Constructor Methods
def makePop(self):
""" Initializes the rule population, as an empty list"""
self.popSet = []
def rebootPop(self, pop_reboot_path):
"""Remakes a previously evolved population from a saved text file
:param pop_reboot_path:
:return:
"""
print("Rebooting the following population: " + str(pop_reboot_path) + "_RulePop.txt")
# *******************Initial file handling**********************************************************
datasetList = []
try:
f = open(pop_reboot_path + "_RulePop.txt", 'r')
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', pop_reboot_path + "_RulePop.txt")
raise
else:
self.headerList = f.readline().rstrip('\n').split('\t') # strip off first row
for line in f:
lineList = line.strip('\n').split('\t')
datasetList.append(lineList)
f.close()
# **************************************************************************************************
for each in datasetList:
cl = Classifier(each)
self.popSet.append(cl)
numerosityRef = cons.env.formatData.numAttributes + 3
self.microPopSize += int(each[numerosityRef])
print("Rebooted Rule Population has " + str(len(self.popSet)) + " Macro Pop Size.")
# Classifier set constructor methods
def makeMatchSet(self, state_phenotype, exploreIter):
"""Constructs a match set from the population
Covering is initiated if the match set is empty or a rule with the current correct phenotype is absent.
:param list state_phenotype: Listing consisting of the training state and training phenotype
:param int exploreIter: The current iteration
"""
# Initial values
state = state_phenotype[0]
phenotype = state_phenotype[1]
# Covering check:
# 1. Checks that a match is present, and
# 2. That at least one match dictates the correct phenotype.
doCovering = True
setNumerositySum = 0
# Carry out matching
cons.timer.startTimeMatching()
for i in range(len(self.popSet)): # Go through the population
cl = self.popSet[i] # One classifier at a time
if cl.match(state): # Check for match
self.matchSet.append(i) # If match - add classifier to match set
setNumerositySum += cl.numerosity # Increment the set numerosity sum
# Covering Check--------------------------------------------------------
if cons.env.formatData.discretePhenotype: # Discrete phenotype
if cl.phenotype == phenotype: # Check for phenotype coverage
doCovering = False
else: # Continuous phenotype
if float(cl.phenotype[0]) <= float(phenotype) <= float(
cl.phenotype[1]): # Check for phenotype coverage
doCovering = False
cons.timer.stopTimeMatching()
# -------------------------------------------------------
# COVERING
# -------------------------------------------------------
while doCovering:
newCl = Classifier(setNumerositySum + 1, exploreIter, state, phenotype)
self.addClassifierToPopulation(newCl, True)
self.matchSet.append(len(self.popSet) - 1) # Add covered classifier to matchset
doCovering = False
def makeCorrectSet(self, phenotype):
"""Constructs a correct set out of the given match set
:param phenotype:
:return:
"""
for i in range(len(self.matchSet)):
ref = self.matchSet[i]
# -------------------------------------------------------
# DISCRETE PHENOTYPE
# -------------------------------------------------------
if cons.env.formatData.discretePhenotype:
if self.popSet[ref].phenotype == phenotype:
self.correctSet.append(ref)
# -------------------------------------------------------
# CONTINUOUS PHENOTYPE
# -------------------------------------------------------
else:
if float(phenotype) <= float(self.popSet[ref].phenotype[1]) and float(phenotype) >= float(
self.popSet[ref].phenotype[0]):
self.correctSet.append(ref)
def makeEvalMatchSet(self, state):
"""Constructs a match set for evaluation purposes which does not activate either covering or deletion.
:param state:
:return:
"""
for i in range(len(self.popSet)): # Go through the population
cl = self.popSet[i] # A single classifier
if cl.match(state): # Check for match
self.matchSet.append(i) # Add classifier to match set
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# CLASSIFIER DELETION METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def deletion(self, exploreIter):
"""Returns the population size back to the maximum set by the user by deleting rules.
:param exploreIter:
:return:
"""
cons.timer.startTimeDeletion()
while self.microPopSize > cons.N:
self.deleteFromPopulation()
cons.timer.stopTimeDeletion()
def deleteFromPopulation(self):
""" Deletes one classifier in the population.
The classifier that will be deleted is chosen by roulette wheel selection considering the deletion vote.
Returns the macro-classifier which got decreased by one micro-classifier.
"""
meanFitness = self.getPopFitnessSum() / float(self.microPopSize)
# Calculate total wheel size------------------------------
sumCl = 0.0
voteList = []
for cl in self.popSet:
vote = cl.getDelProp(meanFitness)
sumCl += vote
voteList.append(vote)
# --------------------------------------------------------
choicePoint = sumCl * random.random() # Determine the choice point
newSum = 0.0
for i in range(len(voteList)):
cl = self.popSet[i]
newSum = newSum + voteList[i]
if newSum > choicePoint: # Select classifier for deletion
# Delete classifier----------------------------------
cl.updateNumerosity(-1)
self.microPopSize -= 1
if cl.numerosity < 1: # When all micro-classifiers for a given classifier have been depleted.
self.removeMacroClassifier(i)
self.deleteFromMatchSet(i)
self.deleteFromCorrectSet(i)
return
print("ClassifierSet: No eligible rules found for deletion in deleteFromPopulation.")
return
def removeMacroClassifier(self, ref):
"""Removes the specified (macro-) classifier from the population.
:param ref:
:return:
"""
self.popSet.pop(ref)
def deleteFromMatchSet(self, deleteRef):
"""Delete reference to classifier in population, contained in self.matchSet.
:param deleteRef:
:return:
"""
if deleteRef in self.matchSet:
self.matchSet.remove(deleteRef)
# Update match set reference list--------
for j in range(len(self.matchSet)):
ref = self.matchSet[j]
if ref > deleteRef:
self.matchSet[j] -= 1
def deleteFromCorrectSet(self, deleteRef):
"""Delete reference to classifier in population, contained in self.corectSet.
:param deleteRef:
:return:
"""
if deleteRef in self.correctSet:
self.correctSet.remove(deleteRef)
# Update match set reference list--------
for j in range(len(self.correctSet)):
ref = self.correctSet[j]
if ref > deleteRef:
self.correctSet[j] -= 1
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# GENETIC ALGORITHM
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def runGA(self, exploreIter, state, phenotype):
"""The genetic discovery mechanism in eLCS is controlled here.
:param exploreIter:
:param state:
:param phenotype:
:return:
"""
# -------------------------------------------------------
# GA RUN REQUIREMENT
# -------------------------------------------------------
# Does the correct set meet the requirements for activating the GA?
if (exploreIter - self.getIterStampAverage()) < cons.theta_GA:
return
self.setIterStamps(
exploreIter) # Updates the iteration time stamp for all rules in the correct set (which the GA opperates in).
changed = False
# -------------------------------------------------------
# SELECT PARENTS - Niche GA - selects parents from the correct class
# -------------------------------------------------------
cons.timer.startTimeSelection()
if cons.selectionMethod == "roulette":
selectList = self.selectClassifierRW()
clP1 = selectList[0]
clP2 = selectList[1]
elif cons.selectionMethod == "tournament":
selectList = self.selectClassifierT()
clP1 = selectList[0]
clP2 = selectList[1]
else:
print("ClassifierSet: Error - requested GA selection method not available.")
cons.timer.stopTimeSelection()
# -------------------------------------------------------
# INITIALIZE OFFSPRING
# -------------------------------------------------------
cl1 = Classifier(clP1, exploreIter)
if clP2 == None:
cl2 = Classifier(clP1, exploreIter)
else:
cl2 = Classifier(clP2, exploreIter)
# -------------------------------------------------------
# CROSSOVER OPERATOR - Uniform Crossover Implemented (i.e. all attributes have equal probability of crossing over between two parents)
# -------------------------------------------------------
if not cl1.equals(cl2) and random.random() < cons.chi:
changed = cl1.uniformCrossover(cl2)
# -------------------------------------------------------
# INITIALIZE KEY OFFSPRING PARAMETERS
# -------------------------------------------------------
if changed:
cl1.setAccuracy((cl1.accuracy + cl2.accuracy) / 2.0)
cl1.setFitness(cons.fitnessReduction * (cl1.fitness + cl2.fitness) / 2.0)
cl2.setAccuracy(cl1.accuracy)
cl2.setFitness(cl1.fitness)
else:
cl1.setFitness(cons.fitnessReduction * cl1.fitness)
cl2.setFitness(cons.fitnessReduction * cl2.fitness)
# -------------------------------------------------------
# MUTATION OPERATOR
# -------------------------------------------------------
nowchanged = cl1.Mutation(state, phenotype)
howaboutnow = cl2.Mutation(state, phenotype)
# -------------------------------------------------------
# ADD OFFSPRING TO POPULATION
# -------------------------------------------------------
if changed or nowchanged or howaboutnow:
self.insertDiscoveredClassifiers(cl1, cl2, clP1, clP2, exploreIter) # Subsumption
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# SELECTION METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def selectClassifierRW(self):
"""Selects parents using roulette wheel selection according to the fitness of the classifiers.
:return:
"""
# Prepare for correct set or 'niche' selection.
setList = copy.deepcopy(self.correctSet)
if len(setList) > 2:
selectList = [None, None]
currentCount = 0 # Pick two parents
while currentCount < 2:
fitSum = self.getFitnessSum(setList)
choiceP = random.random() * fitSum
i = 0
sumCl = self.popSet[setList[i]].fitness
while choiceP > sumCl:
i = i + 1
sumCl += self.popSet[setList[i]].fitness
selectList[currentCount] = self.popSet[setList[i]]
setList.remove(setList[i])
currentCount += 1
elif len(setList) == 2:
selectList = [self.popSet[setList[0]], self.popSet[setList[1]]]
elif len(setList) == 1:
selectList = [self.popSet[setList[0]], self.popSet[setList[0]]]
else:
print("ClassifierSet: Error in parent selection.")
return selectList
def selectClassifierT(self):
"""Selects parents using tournament selection according to the fitness of the classifiers."""
selectList = [None, None]
currentCount = 0
setList = self.correctSet # correct set is a list of reference IDs
while currentCount < 2:
tSize = int(len(setList) * cons.theta_sel)
posList = random.sample(setList, tSize)
bestF = 0
bestC = self.correctSet[0]
for j in posList:
if self.popSet[j].fitness > bestF:
bestF = self.popSet[j].fitness
bestC = j
selectList[currentCount] = self.popSet[bestC]
currentCount += 1
return selectList
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# SUBSUMPTION METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def subsumeClassifier(self, cl=None, cl1P=None, cl2P=None):
"""Tries to subsume a classifier in the parents.
If no subsumption is possible it tries to subsume it in the current set.
:param cl:
:param cl1P:
:param cl2P:
:return:
"""
if cl1P != None and cl1P.subsumes(cl):
self.microPopSize += 1
cl1P.updateNumerosity(1)
elif cl2P != None and cl2P.subsumes(cl):
self.microPopSize += 1
cl2P.updateNumerosity(1)
else:
self.subsumeClassifier2(cl); # Try to subsume in the correct set.
def subsumeClassifier2(self, cl):
"""Tries to subsume a classifier in the correct set.
If no subsumption is possible the classifier is simply added to the population considering the
possibility that there exists an identical classifier.
:param cl:
:return:
"""
choices = []
for ref in self.correctSet:
if self.popSet[ref].subsumes(cl):
choices.append(ref)
if len(choices) > 0: # Randomly pick one classifier to be subsumer
choice = int(random.random() * len(choices))
self.popSet[choices[choice]].updateNumerosity(1)
self.microPopSize += 1
return
self.addClassifierToPopulation(cl,
False) # If no subsumer was found, check for identical classifier, if not then add the classifier to the population
def doCorrectSetSubsumption(self):
""" Executes correct set subsumption.
The correct set subsumption looks for the most general subsumer classifier in the correct set
and subsumes all classifiers that are more specific than the selected one.
"""
subsumer = None
for ref in self.correctSet:
cl = self.popSet[ref]
if cl.isSubsumer():
if subsumer == None or cl.isMoreGeneral(subsumer):
subsumer = cl
if subsumer != None: # If a subsumer was found, subsume all more specific classifiers in the correct set
i = 0
while i < len(self.correctSet):
ref = self.correctSet[i]
if subsumer.isMoreGeneral(self.popSet[ref]):
subsumer.updateNumerosity(self.popSet[ref].numerosity)
self.removeMacroClassifier(ref)
self.deleteFromMatchSet(ref)
self.deleteFromCorrectSet(ref)
i = i - 1
i = i + 1
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# OTHER KEY METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def addClassifierToPopulation(self, cl, covering):
"""Adds a classifier to the set and increases the microPopSize value accordingly.
:param cl:
:param covering:
:return:
"""
oldCl = None
if not covering:
oldCl = self.getIdenticalClassifier(cl)
if oldCl != None: # found identical classifier
oldCl.updateNumerosity(1)
self.microPopSize += 1
else:
self.popSet.append(cl)
self.microPopSize += 1
def insertDiscoveredClassifiers(self, cl1, cl2, clP1, clP2, exploreIter):
"""Inserts both discovered classifiers and activates GA subsumption if turned on.
Also checks for default rule (i.e. rule with completely general condition)
and prevents such rules from being added to the population, as it offers no predictive value within eLCS.
:param cl1:
:param cl2:
:param clP1:
:param clP2:
:param exploreIter:
:return:
"""
# -------------------------------------------------------
# SUBSUMPTION
# -------------------------------------------------------
if cons.doSubsumption:
cons.timer.startTimeSubsumption()
if len(cl1.specifiedAttList) > 0:
self.subsumeClassifier(cl1, clP1, clP2)
if len(cl2.specifiedAttList) > 0:
self.subsumeClassifier(cl2, clP1, clP2)
cons.timer.stopTimeSubsumption()
# -------------------------------------------------------
# ADD OFFSPRING TO POPULATION
# -------------------------------------------------------
else: # Just add the new classifiers to the population.
if len(cl1.specifiedAttList) > 0:
self.addClassifierToPopulation(cl1,
False) # False passed because this is not called for a covered rule.
if len(cl2.specifiedAttList) > 0:
self.addClassifierToPopulation(cl2,
False) # False passed because this is not called for a covered rule.
def updateSets(self, exploreIter):
"""Updates all relevant parameters in the current match and correct sets.
:param exploreIter:
:return:
"""
matchSetNumerosity = 0
for ref in self.matchSet:
matchSetNumerosity += self.popSet[ref].numerosity
for ref in self.matchSet:
self.popSet[ref].updateExperience()
self.popSet[ref].updateMatchSetSize(matchSetNumerosity)
if ref in self.correctSet:
self.popSet[ref].updateCorrect()
self.popSet[ref].updateAccuracy()
self.popSet[ref].updateFitness()
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# OTHER METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def getIterStampAverage(self):
"""Returns the average of the time stamps in the correct set."""
sumCl = 0.0
numSum = 0.0
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
sumCl += self.popSet[ref].timeStampGA * self.popSet[ref].numerosity
numSum += self.popSet[ref].numerosity # numerosity sum of correct set
return sumCl / float(numSum)
def setIterStamps(self, exploreIter):
""" Sets the time stamp of all classifiers in the set to the current time.
The current time is the number of exploration steps executed so far.
:param exploreIter:
:return:
"""
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
self.popSet[ref].updateTimeStamp(exploreIter)
def getFitnessSum(self, setList):
"""Returns the sum of the fitnesses of all classifiers in the set.
:param setList:
:return:
"""
sumCl = 0.0
for i in range(len(setList)):
ref = setList[i]
sumCl += self.popSet[ref].fitness
return sumCl
def getPopFitnessSum(self):
""" Returns the sum of the fitnesses of all classifiers in the set. """
sumCl = 0.0
for cl in self.popSet:
sumCl += cl.fitness * cl.numerosity
return sumCl
def getIdenticalClassifier(self, newCl):
"""Looks for an identical classifier in the population.
:param newCl:
:return:
"""
for cl in self.popSet:
if newCl.equals(cl):
return cl
return None
def clearSets(self):
"""Clears out references in the match and correct sets for the next learning iteration."""
self.matchSet = []
self.correctSet = []
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# EVALUTATION METHODS
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def runPopAveEval(self, exploreIter):
"""Calculates some summary evaluations across the rule population including average generality.
:param exploreIter:
:return:
"""
genSum = 0
agedCount = 0
for cl in self.popSet:
genSum += ((cons.env.formatData.numAttributes - len(cl.condition)) / float(
cons.env.formatData.numAttributes)) * cl.numerosity
if self.microPopSize == 0:
self.aveGenerality = 'NA'
else:
self.aveGenerality = genSum / float(self.microPopSize)
# -------------------------------------------------------
# CONTINUOUS PHENOTYPE
# -------------------------------------------------------
if not cons.env.formatData.discretePhenotype:
sumRuleRange = 0
for cl in self.popSet:
sumRuleRange += (cl.phenotype[1] - cl.phenotype[0]) * cl.numerosity
phenotypeRange = cons.env.formatData.phenotypeList[1] - cons.env.formatData.phenotypeList[0]
self.avePhenotypeRange = (sumRuleRange / float(self.microPopSize)) / float(phenotypeRange)
def runAttGeneralitySum(self, isEvaluationSummary):
"""Determine the population-wide frequency of attribute specification, and accuracy weighted specification.
Used in complete rule population evaluations.
:param isEvaluationSummary:
:return:
"""
if isEvaluationSummary:
self.attributeSpecList = []
self.attributeAccList = []
for i in range(cons.env.formatData.numAttributes):
self.attributeSpecList.append(0)
self.attributeAccList.append(0.0)
for cl in self.popSet:
for ref in cl.specifiedAttList: # for each attRef
self.attributeSpecList[ref] += cl.numerosity
self.attributeAccList[ref] += cl.numerosity * cl.accuracy
def getPopTrack(self, accuracy, exploreIter, trackingFrequency):
"""Returns a formatted output string to be printed to the Learn Track output file.
:param accuracy:
:param exploreIter:
:param trackingFrequency:
:return:
"""
# Runtime variables for classifier
epoch = int(exploreIter / trackingFrequency)
iteration = exploreIter
macro_pop = len(self.popSet)
micro_pop = self.microPopSize
acc_estimate = accuracy
ave_gen = self.aveGenerality
time = cons.timer.returnGlobalTimer()
# Add to runtime dictionary
iter_results = {
'epoch': epoch,
'iteration': iteration,
'macro_pop': macro_pop,
'micro_pop': micro_pop,
'acc_estimate': acc_estimate,
'ave_gen': ave_gen,
'time': time
}
# Print results from current iteration out to the console
if cons.env.formatData.discretePhenotype:
# Discrete phenotype
print("Epoch: {0}".format(epoch) +
"\t Iteration: {0}".format(iteration) +
"\t MacroPop: {0}".format(macro_pop) +
"\t MicroPop: {0}".format(micro_pop) +
"\t AccEstimate: {0}".format(acc_estimate) +
"\t AveGen: {0}".format(ave_gen) +
"\t Time: {0}".format(time))
else:
# Continuous phenotype
phen_range = self.avePhenotypeRange # For continous phenotypes only
print("Epoch: {0}".format(epoch) +
"\t Iteration: {0}".format(iteration) +
"\t MacroPop: {0}".format(macro_pop) +
"\t MicroPop: {0}".format(micro_pop) +
"\t AccEstimate: {0}".format(acc_estimate) +
"\t AveGen: {0}".format(ave_gen) +
"\t PhenRange: {0}".format(phen_range) +
"\t Time: {0}".format(time))
iter_results['phen_range'] = phen_range
# Store the results of the current iteration
self.runtimeParams.append(iter_results)
# Return results as \t separated string
trackString = str(iteration) + "\t" + \
str(macro_pop) + "\t" + \
str(micro_pop) + "\t" + \
str(acc_estimate) + "\t" + \
str(ave_gen) + "\t" + \
str(time) + "\n"
return trackString
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
import nova.db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import utils
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id, 'power_state': power_state.SHUTDOWN,
'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
# NOTE(oomichi): v2.1 API does not support security group management (create/
# update/delete a security group). We don't need to test this class against
# v2.1 API.
class TestSecurityGroups(test.TestCase):
def setUp(self):
super(TestSecurityGroups, self).setUp()
self.controller = secgroups_v2.SecurityGroupController()
self.server_controller = (
secgroups_v2.ServerSecurityGroupController())
self.manager = secgroups_v2.SecurityGroupActionController()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_template()
sg['description'] = ""
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
try:
self.controller.create(req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput as exc:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stubs.Set(nova.db, 'security_group_update',
return_update_security_group)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.update(req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_template(id=2, rules=[])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
context = req.environ['nova.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
use_admin_context=True)
self.controller.delete(req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
# NOTE(oomichi): v2.1 API does not support security group management (create/
# update/delete a security group). We don't need to test this class against
# v2.1 API.
class TestSecurityGroupRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupRules, self).setUp()
self.controller = secgroups_v2.SecurityGroupController()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.NotFound()
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
self.controller = secgroups_v2.SecurityGroupRulesController()
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
self.deserializer = secgroups_v2.SecurityGroupRulesXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<ip_protocol>tcp</ip_protocol>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"ip_protocol": "tcp",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEqual(request['body'], expected)
def test_create_no_protocol_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEqual(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLDeserializer, self).setUp()
self.deserializer = secgroups_v2.SecurityGroupXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group name="test">
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
"description": "test",
},
}
self.assertEqual(request['body'], expected)
def test_create_no_description_request(self):
serial_request = """
<security_group name="test">
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
},
}
self.assertEqual(request['body'], expected)
def test_create_no_name_request(self):
serial_request = """
<security_group>
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"description": "test",
},
}
self.assertEqual(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer = secgroups_v2.SecurityGroupRuleTemplate()
self.index_serializer = secgroups_v2.SecurityGroupsTemplate()
self.default_serializer = secgroups_v2.SecurityGroupTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
self.assertEqual(raw_rule['parent_group_id'],
tree.get('parent_group_id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port',
'group', 'group/name', 'group/tenant_id',
'ip_range', 'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
self.assertIn(child_tag, raw_rule)
seen.add(child_tag)
if child_tag in ('group', 'ip_range'):
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertIn(gr_child_tag, raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def _verify_security_group(self, raw_group, tree):
rules = raw_group['rules']
self.assertEqual('security_group', self._tag(tree))
self.assertEqual(raw_group['id'], tree.get('id'))
self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
self.assertEqual(raw_group['name'], tree.get('name'))
self.assertEqual(2, len(tree))
for child in tree:
child_tag = self._tag(child)
if child_tag == 'rules':
self.assertEqual(2, len(child))
for idx, gr_child in enumerate(child):
self.assertEqual(self._tag(gr_child), 'rule')
self._verify_security_group_rule(rules[idx], gr_child)
else:
self.assertEqual('description', child_tag)
self.assertEqual(raw_group['description'], child.text)
def test_rule_serializer(self):
raw_rule = dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group', tenant_id='tenant'),
ip_range=dict(cidr='10.0.0.0/8'))
rule = dict(security_group_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_rule', self._tag(tree))
self._verify_security_group_rule(raw_rule, tree)
def test_group_serializer(self):
rules = [dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.55.44.0/24')),
dict(
id='654',
parent_group_id='321',
ip_protocol='udp',
from_port='234',
to_port='567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.44.55.0/24'))]
raw_group = dict(
id='890',
description='description',
name='name',
tenant_id='tenant',
rules=rules)
sg_group = dict(security_group=raw_group)
text = self.default_serializer.serialize(sg_group)
tree = etree.fromstring(text)
self._verify_security_group(raw_group, tree)
def test_groups_serializer(self):
rules = [dict(
id='123',
parent_group_id='1234',
ip_protocol='tcp',
from_port='12345',
to_port='123456',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.123.0.0/24')),
dict(
id='234',
parent_group_id='2345',
ip_protocol='udp',
from_port='23456',
to_port='234567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.234.0.0/24')),
dict(
id='345',
parent_group_id='3456',
ip_protocol='tcp',
from_port='34567',
to_port='345678',
group=dict(name='group3', tenant_id='tenant3'),
ip_range=dict(cidr='10.345.0.0/24')),
dict(
id='456',
parent_group_id='4567',
ip_protocol='udp',
from_port='45678',
to_port='456789',
group=dict(name='group4', tenant_id='tenant4'),
ip_range=dict(cidr='10.456.0.0/24'))]
groups = [dict(
id='567',
description='description1',
name='name1',
tenant_id='tenant1',
rules=rules[0:2]),
dict(
id='678',
description='description2',
name='name2',
tenant_id='tenant2',
rules=rules[2:4])]
sg_groups = dict(security_groups=groups)
text = self.index_serializer.serialize(sg_groups)
tree = etree.fromstring(text)
self.assertEqual('security_groups', self._tag(tree))
self.assertEqual(len(groups), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group(groups[idx], child)
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
db_list = [
fakes.stub_instance(
1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance(
2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list,
['metadata', 'system_metadata',
'security_groups', 'info_cache'])
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v3/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v3(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
base_url = '/v2/fake/servers'
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTestV2):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 14892 if testnet else 4892
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the fol- lowing conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
class ComplexType(dict):
_value = 'Value'
def __repr__(self):
return '{0}{1}'.format(getattr(self, self._value, None), self.copy())
def __str__(self):
return str(getattr(self, self._value, ''))
class DeclarativeType(object):
def __init__(self, _hint=None, **kw):
self._value = None
if _hint is not None:
self._hint = _hint
return
class JITResponse(ResponseElement):
pass
self._hint = JITResponse
self._hint.__name__ = 'JIT_{0}/{1}'.format(self.__class__.__name__,
hex(id(self._hint))[2:])
for name, value in kw.items():
setattr(self._hint, name, value)
def __repr__(self):
parent = getattr(self, '_parent', None)
return '<{0}_{1}/{2}_{3}>'.format(self.__class__.__name__,
parent and parent._name or '?',
getattr(self, '_name', '?'),
hex(id(self.__class__)))
def setup(self, parent, name, *args, **kw):
self._parent = parent
self._name = name
self._clone = self.__class__(_hint=self._hint)
self._clone._parent = parent
self._clone._name = name
setattr(self._parent, self._name, self._clone)
def start(self, *args, **kw):
raise NotImplementedError
def end(self, *args, **kw):
raise NotImplementedError
def teardown(self, *args, **kw):
setattr(self._parent, self._name, self._value)
class Element(DeclarativeType):
def start(self, *args, **kw):
self._value = self._hint(parent=self._parent, **kw)
return self._value
def end(self, *args, **kw):
pass
class SimpleList(DeclarativeType):
def __init__(self, *args, **kw):
super(SimpleList, self).__init__(*args, **kw)
self._value = []
def start(self, *args, **kw):
return None
def end(self, name, value, *args, **kw):
self._value.append(value)
class ElementList(SimpleList):
def start(self, *args, **kw):
value = self._hint(parent=self._parent, **kw)
self._value.append(value)
return value
def end(self, *args, **kw):
pass
class MemberList(Element):
def __init__(self, _member=None, _hint=None, *args, **kw):
message = 'Invalid `member` specification in {0}'.format(self.__class__.__name__)
assert 'member' not in kw, message
if _member is None:
if _hint is None:
super(MemberList, self).__init__(*args, member=ElementList(**kw))
else:
super(MemberList, self).__init__(_hint=_hint)
else:
if _hint is None:
if issubclass(_member, DeclarativeType):
member = _member(**kw)
else:
member = ElementList(_member, **kw)
super(MemberList, self).__init__(*args, member=member)
else:
message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__,
_hint)
raise AssertionError(message)
def teardown(self, *args, **kw):
if self._value is None:
self._value = []
else:
if isinstance(self._value.member, DeclarativeType):
self._value.member = []
self._value = self._value.member
super(MemberList, self).teardown(*args, **kw)
class ResponseFactory(object):
def __init__(self, scopes=None):
self.scopes = [] if scopes is None else scopes
def element_factory(self, name, parent):
class DynamicElement(parent):
_name = name
setattr(DynamicElement, '__name__', str(name))
return DynamicElement
def search_scopes(self, key):
for scope in self.scopes:
if hasattr(scope, key):
return getattr(scope, key)
if hasattr(scope, '__getitem__'):
if key in scope:
return scope[key]
def find_element(self, action, suffix, parent):
element = self.search_scopes(action + suffix)
if element is not None:
return element
if action.endswith('ByNextToken'):
element = self.search_scopes(action[:-len('ByNextToken')] + suffix)
if element is not None:
return self.element_factory(action + suffix, element)
return self.element_factory(action + suffix, parent)
def __call__(self, action, connection=None):
response = self.find_element(action, 'Response', Response)
if not hasattr(response, action + 'Result'):
result = self.find_element(action, 'Result', ResponseElement)
setattr(response, action + 'Result', Element(result))
return response(connection=connection)
def strip_namespace(func):
def wrapper(self, name, *args, **kw):
if self._namespace is not None:
if name.startswith(self._namespace + ':'):
name = name[len(self._namespace + ':'):]
return func(self, name, *args, **kw)
return wrapper
class ResponseElement(dict):
_override = {}
_name = None
_namespace = None
def __init__(self, connection=None, name=None, parent=None, attrs=None):
if parent is not None and self._namespace is None:
self._namespace = parent._namespace
if connection is not None:
self._connection = connection
self._name = name or self._name or self.__class__.__name__
self._declared('setup', attrs=attrs)
dict.__init__(self, attrs and attrs.copy() or {})
def _declared(self, op, **kw):
def inherit(obj):
result = {}
for cls in getattr(obj, '__bases__', ()):
result.update(inherit(cls))
result.update(obj.__dict__)
return result
scope = inherit(self.__class__)
scope.update(self.__dict__)
declared = lambda attr: isinstance(attr[1], DeclarativeType)
for name, node in filter(declared, scope.items()):
getattr(node, op)(self, name, parentname=self._name, **kw)
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{0!s}: {1!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
name = self.__class__.__name__
if name.startswith('JIT_'):
name = '^{0}^'.format(self._name or '')
return '{0}{1!r}({2})'.format(
name, self.copy(), ', '.join(map(render, attrs)))
def _type_for(self, name, attrs):
return self._override.get(name, globals().get(name, ResponseElement))
@strip_namespace
def startElement(self, name, attrs, connection):
attribute = getattr(self, name, None)
if isinstance(attribute, DeclarativeType):
return attribute.start(name=name, attrs=attrs,
connection=connection)
elif attrs.getLength():
setattr(self, name, ComplexType(attrs.copy()))
else:
return None
@strip_namespace
def endElement(self, name, value, connection):
attribute = getattr(self, name, None)
if name == self._name:
self._declared('teardown')
elif isinstance(attribute, DeclarativeType):
attribute.end(name=name, value=value, connection=connection)
elif isinstance(attribute, ComplexType):
setattr(attribute, attribute._value, value)
else:
setattr(self, name, value)
class Response(ResponseElement):
ResponseMetadata = Element()
@strip_namespace
def startElement(self, name, attrs, connection):
if name == self._name:
self.update(attrs)
else:
return super(Response, self).startElement(name, attrs, connection)
@property
def _result(self):
return getattr(self, self._action + 'Result', None)
@property
def _action(self):
return (self._name or self.__class__.__name__)[:-len('Response')]
class ResponseResultList(Response):
_ResultClass = ResponseElement
def __init__(self, *args, **kw):
setattr(self, self._action + 'Result', ElementList(self._ResultClass))
super(ResponseResultList, self).__init__(*args, **kw)
class FeedSubmissionInfo(ResponseElement):
pass
class SubmitFeedResult(ResponseElement):
FeedSubmissionInfo = Element(FeedSubmissionInfo)
class GetFeedSubmissionListResult(ResponseElement):
FeedSubmissionInfo = ElementList(FeedSubmissionInfo)
class GetFeedSubmissionCountResult(ResponseElement):
pass
class CancelFeedSubmissionsResult(GetFeedSubmissionListResult):
pass
class GetServiceStatusResult(ResponseElement):
Messages = Element(Messages=ElementList())
class ReportRequestInfo(ResponseElement):
pass
class RequestReportResult(ResponseElement):
ReportRequestInfo = Element()
class GetReportRequestListResult(RequestReportResult):
ReportRequestInfo = ElementList()
class CancelReportRequestsResult(RequestReportResult):
pass
class GetReportListResult(ResponseElement):
ReportInfo = ElementList()
class ManageReportScheduleResult(ResponseElement):
ReportSchedule = Element()
class GetReportScheduleListResult(ManageReportScheduleResult):
pass
class UpdateReportAcknowledgementsResult(GetReportListResult):
pass
class CreateInboundShipmentPlanResult(ResponseElement):
InboundShipmentPlans = MemberList(ShipToAddress=Element(),
Items=MemberList())
class ListInboundShipmentsResult(ResponseElement):
ShipmentData = MemberList(ShipFromAddress=Element())
class ListInboundShipmentItemsResult(ResponseElement):
ItemData = MemberList()
class ListInventorySupplyResult(ResponseElement):
InventorySupplyList = MemberList(
EarliestAvailability=Element(),
SupplyDetail=MemberList(
EarliestAvailableToPick=Element(),
LatestAvailableToPick=Element(),
)
)
class ComplexAmount(ResponseElement):
_amount = 'Value'
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount))
def __float__(self):
return float(getattr(self, self._amount))
def __str__(self):
return str(getattr(self, self._amount))
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', self._amount):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == self._amount:
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class ComplexMoney(ComplexAmount):
_amount = 'Amount'
class ComplexWeight(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.Value, self.Unit)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in ('Unit', 'Value'):
message = 'Unrecognized tag {0} in ComplexWeight'.format(name)
raise AssertionError(message)
return super(ComplexWeight, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexWeight, self).endElement(name, value, connection)
class Dimension(ComplexType):
_value = 'Value'
class ComplexDimensions(ResponseElement):
_dimensions = ('Height', 'Length', 'Width', 'Weight')
def __repr__(self):
values = [getattr(self, key, None) for key in self._dimensions]
values = filter(None, values)
return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values))
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in self._dimensions:
message = 'Unrecognized tag {0} in ComplexDimensions'.format(name)
raise AssertionError(message)
setattr(self, name, Dimension(attrs.copy()))
@strip_namespace
def endElement(self, name, value, connection):
if name in self._dimensions:
value = Decimal(value or '0')
ResponseElement.endElement(self, name, value, connection)
class FulfillmentPreviewItem(ResponseElement):
EstimatedShippingWeight = Element(ComplexWeight)
class FulfillmentPreview(ResponseElement):
EstimatedShippingWeight = Element(ComplexWeight)
EstimatedFees = MemberList(Amount=Element(ComplexAmount))
UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem)
FulfillmentPreviewShipments = MemberList(
FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem),
)
class GetFulfillmentPreviewResult(ResponseElement):
FulfillmentPreviews = MemberList(FulfillmentPreview)
class FulfillmentOrder(ResponseElement):
DestinationAddress = Element()
NotificationEmailList = MemberList(SimpleList)
class GetFulfillmentOrderResult(ResponseElement):
FulfillmentOrder = Element(FulfillmentOrder)
FulfillmentShipment = MemberList(
FulfillmentShipmentItem=MemberList(),
FulfillmentShipmentPackage=MemberList(),
)
FulfillmentOrderItem = MemberList()
class ListAllFulfillmentOrdersResult(ResponseElement):
FulfillmentOrders = MemberList(FulfillmentOrder)
class GetPackageTrackingDetailsResult(ResponseElement):
ShipToAddress = Element()
TrackingEvents = MemberList(EventAddress=Element())
class Image(ResponseElement):
pass
class AttributeSet(ResponseElement):
ItemDimensions = Element(ComplexDimensions)
ListPrice = Element(ComplexMoney)
PackageDimensions = Element(ComplexDimensions)
SmallImage = Element(Image)
class ItemAttributes(AttributeSet):
Languages = Element(Language=ElementList())
def __init__(self, *args, **kw):
names = ('Actor', 'Artist', 'Author', 'Creator', 'Director',
'Feature', 'Format', 'GemType', 'MaterialType',
'MediaType', 'OperatingSystem', 'Platform')
for name in names:
setattr(self, name, SimpleList())
super(ItemAttributes, self).__init__(*args, **kw)
class VariationRelationship(ResponseElement):
Identifiers = Element(MarketplaceASIN=Element(),
SKUIdentifier=Element())
GemType = SimpleList()
MaterialType = SimpleList()
OperatingSystem = SimpleList()
class Price(ResponseElement):
LandedPrice = Element(ComplexMoney)
ListingPrice = Element(ComplexMoney)
Shipping = Element(ComplexMoney)
class CompetitivePrice(ResponseElement):
Price = Element(Price)
class CompetitivePriceList(ResponseElement):
CompetitivePrice = ElementList(CompetitivePrice)
class CompetitivePricing(ResponseElement):
CompetitivePrices = Element(CompetitivePriceList)
NumberOfOfferListings = SimpleList()
TradeInValue = Element(ComplexMoney)
class SalesRank(ResponseElement):
pass
class LowestOfferListing(ResponseElement):
Qualifiers = Element(ShippingTime=Element())
Price = Element(Price)
class Offer(ResponseElement):
BuyingPrice = Element(Price)
RegularPrice = Element(ComplexMoney)
class Product(ResponseElement):
_namespace = 'ns2'
Identifiers = Element(MarketplaceASIN=Element(),
SKUIdentifier=Element())
AttributeSets = Element(
ItemAttributes=ElementList(ItemAttributes),
)
Relationships = Element(
VariationParent=ElementList(VariationRelationship),
)
CompetitivePricing = ElementList(CompetitivePricing)
SalesRankings = Element(
SalesRank=ElementList(SalesRank),
)
LowestOfferListings = Element(
LowestOfferListing=ElementList(LowestOfferListing),
)
Offers = Element(
Offer=ElementList(Offer),
)
class ListMatchingProductsResult(ResponseElement):
Products = Element(Product=ElementList(Product))
class ProductsBulkOperationResult(ResponseElement):
Product = Element(Product)
Error = Element()
class ProductsBulkOperationResponse(ResponseResultList):
_ResultClass = ProductsBulkOperationResult
class GetMatchingProductResponse(ProductsBulkOperationResponse):
pass
class GetMatchingProductForIdResult(ListMatchingProductsResult):
pass
class GetMatchingProductForIdResponse(ResponseResultList):
_ResultClass = GetMatchingProductForIdResult
class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse):
pass
class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse):
pass
class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse):
pass
class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse):
pass
class GetMyPriceForSKUResponse(ProductsBulkOperationResponse):
pass
class GetMyPriceForASINResponse(ProductsBulkOperationResponse):
pass
class ProductCategory(ResponseElement):
def __init__(self, *args, **kw):
setattr(self, 'Parent', Element(ProductCategory))
super(ProductCategory, self).__init__(*args, **kw)
class GetProductCategoriesResult(ResponseElement):
Self = ElementList(ProductCategory)
class GetProductCategoriesForSKUResult(GetProductCategoriesResult):
pass
class GetProductCategoriesForASINResult(GetProductCategoriesResult):
pass
class Order(ResponseElement):
OrderTotal = Element(ComplexMoney)
ShippingAddress = Element()
PaymentExecutionDetail = Element(
PaymentExecutionDetailItem=ElementList(
PaymentExecutionDetailItem=Element(
Payment=Element(ComplexMoney)
)
)
)
class ListOrdersResult(ResponseElement):
Orders = Element(Order=ElementList(Order))
class GetOrderResult(ListOrdersResult):
pass
class OrderItem(ResponseElement):
ItemPrice = Element(ComplexMoney)
ShippingPrice = Element(ComplexMoney)
GiftWrapPrice = Element(ComplexMoney)
ItemTax = Element(ComplexMoney)
ShippingTax = Element(ComplexMoney)
GiftWrapTax = Element(ComplexMoney)
ShippingDiscount = Element(ComplexMoney)
PromotionDiscount = Element(ComplexMoney)
PromotionIds = SimpleList()
CODFee = Element(ComplexMoney)
CODFeeDiscount = Element(ComplexMoney)
class ListOrderItemsResult(ResponseElement):
OrderItems = Element(OrderItem=ElementList(OrderItem))
class ListMarketplaceParticipationsResult(ResponseElement):
ListParticipations = Element(Participation=ElementList())
ListMarketplaces = Element(Marketplace=ElementList())
class ListRecommendationsResult(ResponseElement):
ListingQualityRecommendations = MemberList(ItemIdentifier=Element())
class Customer(ResponseElement):
PrimaryContactInfo = Element()
ShippingAddressList = Element(ShippingAddress=ElementList())
AssociatedMarketplaces = Element(MarketplaceDomain=ElementList())
class ListCustomersResult(ResponseElement):
CustomerList = Element(Customer=ElementList(Customer))
class GetCustomersForCustomerIdResult(ListCustomersResult):
pass
class CartItem(ResponseElement):
CurrentPrice = Element(ComplexMoney)
SalePrice = Element(ComplexMoney)
class Cart(ResponseElement):
ActiveCartItemList = Element(CartItem=ElementList(CartItem))
SavedCartItemList = Element(CartItem=ElementList(CartItem))
class ListCartsResult(ResponseElement):
CartList = Element(Cart=ElementList(Cart))
class GetCartsResult(ListCartsResult):
pass
class Destination(ResponseElement):
AttributeList = MemberList()
class ListRegisteredDestinationsResult(ResponseElement):
DestinationList = MemberList(Destination)
class Subscription(ResponseElement):
Destination = Element(Destination)
class GetSubscriptionResult(ResponseElement):
Subscription = Element(Subscription)
class ListSubscriptionsResult(ResponseElement):
SubscriptionList = MemberList(Subscription)
class OrderReferenceDetails(ResponseElement):
Buyer = Element()
OrderTotal = Element(ComplexMoney)
Destination = Element(PhysicalDestination=Element())
SellerOrderAttributes = Element()
OrderReferenceStatus = Element()
Constraints = ElementList()
class SetOrderReferenceDetailsResult(ResponseElement):
OrderReferenceDetails = Element(OrderReferenceDetails)
class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult):
pass
class AuthorizationDetails(ResponseElement):
AuthorizationAmount = Element(ComplexMoney)
CapturedAmount = Element(ComplexMoney)
AuthorizationFee = Element(ComplexMoney)
AuthorizationStatus = Element()
class AuthorizeResult(ResponseElement):
AuthorizationDetails = Element(AuthorizationDetails)
class GetAuthorizationDetailsResult(AuthorizeResult):
pass
class CaptureDetails(ResponseElement):
CaptureAmount = Element(ComplexMoney)
RefundedAmount = Element(ComplexMoney)
CaptureFee = Element(ComplexMoney)
CaptureStatus = Element()
class CaptureResult(ResponseElement):
CaptureDetails = Element(CaptureDetails)
class GetCaptureDetailsResult(CaptureResult):
pass
class RefundDetails(ResponseElement):
RefundAmount = Element(ComplexMoney)
FeeRefunded = Element(ComplexMoney)
RefundStatus = Element()
class RefundResult(ResponseElement):
RefundDetails = Element(RefundDetails)
class GetRefundDetails(RefundResult):
pass
| |
# __BEGIN_LICENSE__
#
# Copyright (C) 2010-2013 Stanford University.
# All rights reserved.
#
# __END_LICENSE__
import numpy as np
import time
from scipy.sparse.linalg.interface import LinearOperator
from lflib.lightfield import LightField
from lflib.imageio import save_image
from lflib.linear_operators import LightFieldOperator
# ----------------------------------------------------------------------------------------
# APPROXIMATE MESSAGE PASSING (AMP) ITERATIVE SOLVER
# ----------------------------------------------------------------------------------------
def amp_reconstruction(lfcal, lightfield,
alpha,
convergence_threshold,
max_iterations,
sparsity,
delta,
debug = False,
long_object = False,
disable_gpu = False,
gpu_id = 0,
save_errors=False,
debug_path = 'amp_debug',
multiscale_smoothing = False,
wavelet_type='la8',
stabilization=None,
standardize=True,
transform_type="undecimated_wavelet"):
alpha = 1.0
# import wavelet functions if needed
if multiscale_smoothing:
from lflib.multiscale3d import ( multiscale_transform_3D, inverse_multiscale_transform_3D,
multiscale_coefficient_update, multiscale_threshold,
multiscale_coefficient_mean, generalized_anscombe,
output_multiscale_coefs, anscombe, inverse_anscombe )
# Prefer wave optics model over geometric optics model
if lfcal.psf_db != None:
db = lfcal.psf_db
else:
db = lfcal.rayspread_db
# get lightfield projection operator
from lflib.volume import LightFieldProjection
lfproj = LightFieldProjection(lfcal.rayspread_db, lfcal.psf_db, disable_gpu = disable_gpu,
gpu_id = gpu_id)
lfproj.set_premultiplier(lfcal.radiometric_correction)
# get lightfield dimensions
nu = db.nu
nv = db.nv
ns = db.ns
nt = db.nt
# Generate the b vector, which contains the observed lightfield
im_subaperture = lightfield.asimage(representation = LightField.TILED_SUBAPERTURE)
b = np.reshape(im_subaperture, (im_subaperture.shape[0]*im_subaperture.shape[1]))
# Create a linear operator for the optical model A. This model
# allows us to compute A or A.T by calling its matvec() and
# rmatvec() methods.
nrays = db.ns*db.nu*db.nt*db.nv
nvoxels = db.nx*db.ny*db.nz
A_operator = LightFieldOperator(lfproj, db)
A = A_operator.as_linear_operator(nrays, nvoxels)
if stabilization is not 'anscombe':
# Model photon shot noise by setting the noise variance at every
# sensor pixel to be equal to that pixels intensity. This should
# be true if photon shot noise is the dominating noise term.
EPS = 1e-1 # Avoid dividing by zero!
# This value works well on fish volumes, but maybe needs tuning?
A_operator.diagonalizer = 1.0/np.sqrt(b+EPS)
A = A_operator.as_linear_operator(nrays, nvoxels)
b *= 1.0/np.sqrt(b+EPS)
else:
print "\t--> Stabilizing variance with Anscombe transform."
gain = 1.0
mu = 0.0
sigma = 10.0
from lflib.multiscale3d import generalized_anscombe, anscombe, inverse_anscombe
# b = generalized_anscombe(b, mu, sigma, gain)
b = anscombe(b)
print "Range b:", np.min(b), np.max(b)
# Renormalize b to take values in [0,1] to improve numerical stability
if standardize:
lf_max = np.max(b)
b /= lf_max
print "Range b standardized:", np.min(b), np.max(b)
# Initialize volume (or wavelet coefs)
vol_shape = np.array([db.ny, db.nx, db.nz])
if multiscale_smoothing:
# construct set of zero-valued wavelet coefficients
zero_vol = np.zeros((nvoxels), dtype=np.float32)
J = int(np.ceil(np.log2(np.min(vol_shape))))
print "J=",J
x, vol_inds = multiscale_transform_3D( zero_vol, vol_shape=vol_shape,
wavelet_type=wavelet_type,
transform_type=transform_type )
else:
# construct flattened zero volume
x = np.zeros((nvoxels), dtype=np.float32)
# Initialize previous estimate average and residual vector
last_estimate_avg = 0.0
error = np.zeros((nrays), dtype=np.float32)
# Create row and column sum vectors used to stabilize iterations.
# These are created by projecting a volume containing all ones (to create
# the weight lightfield), and then back-projecting a light field
# with all ones (to create the weight volume).
# They are used to reweight the estimated volume and errors in order
# to account for the fact that A and A.T have operator norms >1.
#
# N.B. -- there are probably better ways to precondition than this.
x_ones = np.ones((nvoxels), dtype=np.float32)
b_ones = np.ones((nrays), dtype=np.float32)
# Compute weights: x_weights = A.T * b_ones; b_weights = A * x_ones
import time
print 'Computing forward weights...'
tic = time.time()
x_weights = A.rmatvec(b_ones)
print '%0.2f seconds elapsed.' % (time.time()-tic)
print 'Computing backward weights...'
tic = time.time()
b_weights = A.matvec(x_ones)
print '%0.2f seconds elapsed.' % (time.time()-tic)
# Make sure that any zero valued weights are set to nonzero so
# that they don't lead to division by zero below. We then
# normalize the starting volume using the volume weights.
min_bweight = b_weights[np.nonzero(b_weights != 0.0)].min()
min_xweight = x_weights[np.nonzero(x_weights != 0.0)].min()
b_weights[np.nonzero(b_weights < min_bweight)] = min_bweight;
x_weights[np.nonzero(x_weights < min_xweight)] = min_xweight;
#--------------------------------------------------------------------
if save_errors:
iteration_error = []
for i in range(max_iterations):
iteration_tic = time.time()
# In each iteration, forward and backproject error
# from all views at once, then update the volume
#
# STEP 1: forward projection of volume to create sub-aperture images.
if multiscale_smoothing:
# A \Phi x
tic = time.time()
if transform_type == "pyramidal_median":
b_hat=A.matvec(np.asarray(inverse_multiscale_transform_3D(x,vol_inds=vol_inds,
transform_type=transform_type)).flatten() )
else:
inv_3d = np.asarray(inverse_multiscale_transform_3D(x,vol_inds=vol_inds,
transform_type=transform_type))
inv_3d = inv_3d.transpose((1,0,2)).flatten() # reshape for lflib
print "Range inv_3d:", np.min(inv_3d), np.max(inv_3d)
# inv_3d[inv_3d < 0.0]=0.0 # impose nonnegativity
# print "Max inv_3d:", np.max(inv_3d)
b_hat = A.matvec( inv_3d )
b_hat /= b_weights
print "\t--> Forward projection took ", time.time() - tic, " seconds,"
else:
# Ax
b_hat = A.matvec(x) / b_weights
# DEBUGGING
# if i == 1:
# b_debug = np.reshape(b_hat, (db.nt*db.nv, db.ns*db.nu))
# save_image("lf_" + str(i) + ".tif", b_debug);
# STEP 2: Compute error between computed and observed light field images
last_error = error
if stabilization == "anscombe":
# error = -b / anscombe( b_hat ) + 2
error = b - b_hat
else:
error = b - b_hat
# Get average of previous estimates
if multiscale_smoothing:
last_estimate_avg = multiscale_coefficient_mean(x, transform_type=transform_type)
else:
last_estimate_avg = np.mean(x)
# Collect the unweighted error in light field space.
if save_errors:
iteration_error.append( error )
# Add AMP adjustment.
reweighted_error = error + (1.0/delta)*last_estimate_avg*last_error
print "\t--> Last estimage average", last_estimate_avg
# STEP 3: Back-project error onto the volume (or wavelet coefficients)
tic = time.time()
error_backprojected = A.rmatvec(reweighted_error)
error_backprojected /= x_weights
print "\t--> Backward projection took ", time.time() - tic, " seconds,"
if multiscale_smoothing:
tic = time.time()
# These are the multiscale coefficient errors. There is some reshaping that
# must go on to pass the volumes between lflib (column-major (y,x,z)) and
# R (row-major (x,y,z) if an R method is used.
if transform_type == "pyramidal_median":
error_backprojected, vol_inds = multiscale_transform_3D(error_backprojected,
vol_shape=vol_shape,
wavelet_type=wavelet_type,
transform_type=transform_type)
else:
error_backprojected = error_backprojected.reshape(vol_shape).transpose((1,0,2))
error_backprojected = error_backprojected.flatten(order='f')
error_backprojected, vol_inds = multiscale_transform_3D(error_backprojected,
vol_shape=vol_shape,
wavelet_type=wavelet_type,
transform_type=transform_type)
print "\t--> Multiscale transform took ", time.time() - tic, " seconds,"
# Apply the update.
if multiscale_smoothing:
# Update multiscale coefficients.
update_norm = 0
x, update_norm = multiscale_coefficient_update(x, error_backprojected, alpha,
transform_type=transform_type)
# Threshold updated coefs to impose nonnegativity and sparsity.
tic = time.time()
print "\t--> Thresholding multiscale coefficients..."
threshold = [0.0, 0.0, 0.0, 0.0, 1e10, 1e10]
# threshold = sparsity
x = multiscale_threshold(x, threshold=threshold, transform_type=transform_type)
print "\t--> Multiscale thresholding took ", time.time() - tic, " seconds."
else:
# standard AMP update
x_update = error_backprojected
x += alpha * x_update
# threshold element-wise to impose nonnegative and L1
print "\t--> Sparsity:", sparsity
x[x<sparsity]=0.0
# CHECK FOR CONVERGENCE
#
# normalize MSE using input LF
nrays = db.ns*db.nt*db.nu*db.nv
residual_norm = np.linalg.norm(error) / nrays
# normalize MSE using input LF
nvoxels = db.nx*db.ny*db.nz
if multiscale_smoothing:
update_norm /= nvoxels
else:
update_norm = np.linalg.norm(alpha * x_update) / nvoxels
toc = time.time()
print ''
print '\t[ AMP Iteration %d (%0.2f seconds) ] ' % (i, toc-iteration_tic)
print '\t Residual Norm: %0.4g' % (residual_norm)
print '\t Update Norm: %0.4g (tol = %0.2e) ' % (update_norm, convergence_threshold)
# check if convergence criteria met
if i > 0 and residual_norm < convergence_threshold:
break
if multiscale_smoothing:
# Final projection back to volume space
print "\t--> Final thresholding..."
threshold = [0.0, 0.0, 0.0, 1e10, 1e10, 1e10]
x = multiscale_threshold(x, threshold=threshold, transform_type=transform_type,
suppress_scales=[0])
print "\t--> Preparing multiscale coefs for visualization..."
multiscale_coefs_R = output_multiscale_coefs(x,J=J)
multiscale_coefs = []
for i in xrange(J):
if standardize:
scale = lf_max*np.asarray(multiscale_coefs_R[i])
else:
scale = np.asarray(multiscale_coefs_R[i])
scale = scale.astype(np.float32)
multiscale_coefs.append(scale)
print "\t--> Final back projection to volume..."
if transform_type == "pyramidal_median":
x = np.asarray(inverse_multiscale_transform_3D(x,vol_inds,
transform_type=transform_type)).flatten()
else:
x = np.asarray(inverse_multiscale_transform_3D(x,vol_inds,
transform_type=transform_type)).transpose((1,0,2)).flatten()
# x[x<0.0]=0.0
x = inverse_anscombe(x)
if standardize:
vol = np.reshape(x*lf_max, (db.ny, db.nx, db.nz))
else:
vol = np.reshape(x, (db.ny, db.nx, db.nz))
# Slight hack: zero out the outermost XY "shell" of pixels, since
# these are often subject to radiometry artifacts.
vol[0:db.supersample_factor, :, :] = 0.0
vol[-db.supersample_factor:, :, :] = 0.0
vol[:, 0:db.supersample_factor, :] = 0.0
vol[:, -db.supersample_factor:, :] = 0.0
if multiscale_smoothing:
print "Returning volume and multiscale coefficents..."
return vol.astype(np.float32), multiscale_coefs
else:
print "Returning volume..."
return vol.astype(np.float32), None
if __name__ == "__main__":
pass
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import os
import socket
import struct
import threading
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
STDIO_DESCRIPTORS = (0, 1, 2)
class ChunkType:
"""Nailgun protocol chunk types.
N.B. Because we force `__future__.unicode_literals` in sources, string literals are automatically
converted to unicode for us (e.g. 'xyz' automatically becomes u'xyz'). In the case of protocol
implementations, supporting methods like struct.pack() require ASCII values - so we specify
constants such as these as byte literals (e.g. b'xyz', which can only contain ASCII values)
rather than their unicode counterparts. The alternative is to call str.encode('ascii') to
convert the unicode string literals to ascii before use.
"""
ARGUMENT = b"A"
ENVIRONMENT = b"E"
WORKING_DIR = b"D"
COMMAND = b"C"
STDIN = b"0"
STDOUT = b"1"
STDERR = b"2"
START_READING_INPUT = b"S"
STDIN_EOF = b"."
EXIT = b"X"
REQUEST_TYPES = (ARGUMENT, ENVIRONMENT, WORKING_DIR, COMMAND)
EXECUTION_TYPES = (STDIN, STDOUT, STDERR, START_READING_INPUT, STDIN_EOF, EXIT)
VALID_TYPES = REQUEST_TYPES + EXECUTION_TYPES
class NailgunProtocol:
"""A mixin that provides a base implementation of the Nailgun protocol as described on
http://martiansoftware.com/nailgun/protocol.html.
Communications proceed as follows:
1) Client connects to server
2) Client transmits zero or more "Argument" chunks
3) Client transmits zero or more "Environment" chunks
4) Client transmits exactly one "Working Directory" chunk
5) Client transmits exactly one "Command" chunk
6) If server requires stdin input, server transmits exactly one "Start-reading-input" chunk
After step 5 (and/or 6) the following may happen, interleaved and in any order:
7) Client transmits zero or more "stdin" chunks (Only if the client has received a
"Start-reading-input" chunk, and only until the client transmits a "stdin-eof" chunk).
8) Server transmits zero or more "stdout" chunks.
9) Server transmits zero or more "stderr" chunks.
Steps 7-9 repeat indefinitely until the server transmits an "exit" chunk.
"""
ENVIRON_SEP = "="
TTY_ENV_TMPL = "NAILGUN_TTY_{}"
TTY_PATH_ENV = "NAILGUN_TTY_PATH_{}"
HEADER_FMT = b">Ic"
HEADER_BYTES = 5
class ProtocolError(Exception):
"""Raised if there is an error in the underlying nailgun protocol."""
class TruncatedRead(ProtocolError):
"""Raised if there is a socket error while reading an expected number of bytes."""
class TruncatedHeaderError(TruncatedRead):
"""Raised if there is a socket error while reading the header bytes."""
class TruncatedPayloadError(TruncatedRead):
"""Raised if there is a socket error while reading the payload bytes."""
@classmethod
def _decode_unicode_seq(cls, seq):
for item in seq:
if isinstance(item, bytes):
yield item.decode()
else:
yield item
@classmethod
def send_request(cls, sock, working_dir, command, *arguments, **environment):
"""Send the initial Nailgun request over the specified socket."""
for argument in arguments:
cls.write_chunk(sock, ChunkType.ARGUMENT, argument)
for item_tuple in environment.items():
cls.write_chunk(
sock,
ChunkType.ENVIRONMENT,
cls.ENVIRON_SEP.join(cls._decode_unicode_seq(item_tuple)),
)
cls.write_chunk(sock, ChunkType.WORKING_DIR, working_dir)
cls.write_chunk(sock, ChunkType.COMMAND, command)
@classmethod
def parse_request(cls, sock):
"""Parse the request (the pre-execution) section of the nailgun protocol from the given
socket.
Handles reading of the Argument, Environment, Working Directory and Command chunks from the
client which represents the "request" phase of the exchange. Working Directory and Command
are required and must be sent as the last two chunks in this phase. Argument and Environment
chunks are optional and can be sent more than once (thus we aggregate them).
"""
command = None
working_dir = None
arguments = []
environment = {}
while not all((working_dir, command)):
chunk_type, payload = cls.read_chunk(sock)
if chunk_type == ChunkType.ARGUMENT:
arguments.append(payload)
elif chunk_type == ChunkType.ENVIRONMENT:
key, val = payload.split(cls.ENVIRON_SEP, 1)
environment[key] = val
elif chunk_type == ChunkType.WORKING_DIR:
working_dir = payload
elif chunk_type == ChunkType.COMMAND:
command = payload
else:
raise cls.ProtocolError(
"received non-request chunk before header was fully received!"
)
return working_dir, command, arguments, environment
@classmethod
def write_chunk(cls, sock, chunk_type, payload=b""):
"""Write a single chunk to the connected client."""
chunk = cls.construct_chunk(chunk_type, payload)
sock.sendall(chunk)
@classmethod
def construct_chunk(cls, chunk_type, payload, encoding="utf-8"):
"""Construct and return a single chunk."""
if isinstance(payload, str):
payload = payload.encode(encoding)
elif not isinstance(payload, bytes):
raise TypeError("cannot encode type: {}".format(type(payload)))
header = struct.pack(cls.HEADER_FMT, len(payload), chunk_type)
return header + payload
@classmethod
def _read_until(cls, sock, desired_size):
"""Read a certain amount of content from a socket before returning."""
buf = b""
while len(buf) < desired_size:
recv_bytes = sock.recv(desired_size - len(buf))
if not recv_bytes:
raise cls.TruncatedRead(
"Expected {} bytes before socket shutdown, instead received {}".format(
desired_size, len(buf)
)
)
buf += recv_bytes
return buf
@classmethod
def read_chunk(cls, sock, return_bytes=False):
"""Read a single chunk from the connected client.
A "chunk" is a variable-length block of data beginning with a 5-byte chunk header and followed
by an optional payload. The chunk header consists of:
1) The length of the chunk's payload (not including the header) as a four-byte big-endian
unsigned long. The high-order byte is header[0] and the low-order byte is header[3].
2) A single byte identifying the type of chunk.
"""
try:
# Read the chunk header from the socket.
header = cls._read_until(sock, cls.HEADER_BYTES)
except cls.TruncatedRead as e:
raise cls.TruncatedHeaderError("Failed to read nailgun chunk header ({!r}).".format(e))
# Unpack the chunk header.
payload_len, chunk_type = struct.unpack(cls.HEADER_FMT, header)
try:
# Read the chunk payload.
payload = cls._read_until(sock, payload_len)
except cls.TruncatedRead as e:
raise cls.TruncatedPayloadError(
"Failed to read nailgun chunk payload ({!r}).".format(e)
)
# In the case we get an otherwise well-formed chunk, check the chunk_type for validity _after_
# we've drained the payload from the socket to avoid subsequent reads of a stale payload.
if chunk_type not in ChunkType.VALID_TYPES:
raise cls.ProtocolError("invalid chunk type: {}".format(chunk_type))
if not return_bytes:
payload = payload.decode()
return chunk_type, payload
class ProcessStreamTimeout(Exception):
"""Raised after a timeout completes when a timeout is set on the stream during iteration."""
@classmethod
@contextmanager
def _set_socket_timeout(cls, sock, timeout=None):
"""Temporarily set a socket timeout in order to respect a timeout provided to.
.iter_chunks().
"""
if timeout is not None:
prev_timeout = sock.gettimeout()
try:
if timeout is not None:
sock.settimeout(timeout)
yield
except socket.timeout:
raise cls.ProcessStreamTimeout("socket read timed out with timeout {}".format(timeout))
finally:
if timeout is not None:
sock.settimeout(prev_timeout)
@dataclass(frozen=True)
class TimeoutOptions:
start_time: float
interval: float
class TimeoutProvider(ABC):
@abstractmethod
def maybe_timeout_options(self):
"""Called on every stream iteration to obtain a possible specification for a timeout.
If this method returns non-None, it should return an instance of `cls.TimeoutOptions`, which
then initiates a timeout after which the stream will raise `cls.ProcessStreamTimeout`.
:rtype: :class:`cls.TimeoutOptions`, or None
"""
@classmethod
def iter_chunks(cls, maybe_shutdown_socket, return_bytes=False, timeout_object=None):
"""Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs.
:param sock: the socket to read from.
:param bool return_bytes: If False, decode the payload into a utf-8 string.
:param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a
possible timeout.
:raises: :class:`cls.ProcessStreamTimeout`
"""
assert timeout_object is None or isinstance(timeout_object, cls.TimeoutProvider)
if timeout_object is None:
deadline = None
else:
options = timeout_object.maybe_timeout_options()
if options is None:
deadline = None
else:
deadline = options.start_time + options.interval
while 1:
if deadline is not None:
overtime_seconds = deadline - time.time()
if overtime_seconds > 0:
original_timestamp = datetime.datetime.fromtimestamp(deadline).isoformat()
raise cls.ProcessStreamTimeout(
"iterating over bytes from nailgun timed out at {}, overtime seconds: {}".format(
original_timestamp, overtime_seconds
)
)
with maybe_shutdown_socket.lock:
if maybe_shutdown_socket.is_shutdown:
break
# We poll with low timeouts because we poll under a lock. This allows the DaemonPantsRunner
# to shut down the socket, and us to notice, pretty quickly.
with cls._set_socket_timeout(maybe_shutdown_socket.socket, timeout=0.01):
try:
chunk_type, payload = cls.read_chunk(
maybe_shutdown_socket.socket, return_bytes
)
except socket.timeout:
# Timeouts are handled by the surrounding loop
continue
yield chunk_type, payload
if chunk_type == ChunkType.EXIT:
break
@classmethod
def send_start_reading_input(cls, sock):
"""Send the Start-Reading-Input chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.START_READING_INPUT)
@classmethod
def send_stdout(cls, sock, payload):
"""Send the Stdout chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.STDOUT, payload)
@classmethod
def send_stderr(cls, sock, payload):
"""Send the Stderr chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.STDERR, payload)
@classmethod
def send_exit(cls, sock, payload=b""):
"""Send the Exit chunk over the specified socket."""
cls.write_chunk(sock, ChunkType.EXIT, payload)
@classmethod
def send_exit_with_code(cls, sock, code):
"""Send an Exit chunk over the specified socket, containing the specified return code."""
encoded_exit_status = cls.encode_int(code)
cls.send_exit(sock, payload=encoded_exit_status)
@classmethod
def encode_int(cls, obj):
"""Verify the object is an int, and ASCII-encode it.
:param int obj: An integer to be encoded.
:raises: :class:`TypeError` if `obj` is not an integer.
:return: A binary representation of the int `obj` suitable to pass as the `payload` to
send_exit().
"""
if not isinstance(obj, int):
raise TypeError(
"cannot encode non-integer object in encode_int(): object was {} (type '{}').".format(
obj, type(obj)
)
)
return str(obj).encode("ascii")
@classmethod
def encode_env_var_value(cls, obj):
"""Convert `obj` into a UTF-8 encoded binary string.
The result of this method be used as the value of an environment variable in a subsequent
NailgunClient execution.
"""
return str(obj).encode()
@classmethod
def isatty_to_env(cls, stdin, stdout, stderr):
"""Generate nailgun tty capability environment variables based on checking a set of fds.
:param file stdin: The stream to check for stdin tty capabilities.
:param file stdout: The stream to check for stdout tty capabilities.
:param file stderr: The stream to check for stderr tty capabilities.
:returns: A dict containing the tty capability environment variables.
"""
def gen_env_vars():
for fd_id, fd in zip(STDIO_DESCRIPTORS, (stdin, stdout, stderr)):
is_atty = fd.isatty()
yield (cls.TTY_ENV_TMPL.format(fd_id), cls.encode_env_var_value(int(is_atty)))
if is_atty:
yield (cls.TTY_PATH_ENV.format(fd_id), os.ttyname(fd.fileno()) or b"")
return dict(gen_env_vars())
@classmethod
def isatty_from_env(cls, env):
"""Determine whether remote file descriptors are tty capable using std nailgunned env
variables.
:param dict env: A dictionary representing the environment.
:returns: A tuple of boolean values indicating istty or not for (stdin, stdout, stderr).
"""
def str_int_bool(i):
return i.isdigit() and bool(
int(i)
) # Environment variable values should always be strings.
return tuple(
str_int_bool(env.get(cls.TTY_ENV_TMPL.format(fd_id), "0"))
for fd_id in STDIO_DESCRIPTORS
)
@classmethod
def ttynames_from_env(cls, env):
"""Determines the ttynames for remote file descriptors (if ttys).
:param dict env: A dictionary representing the environment.
:returns: A tuple of boolean values indicating ttyname paths or None for (stdin, stdout, stderr).
"""
return tuple(env.get(cls.TTY_PATH_ENV.format(fd_id)) for fd_id in STDIO_DESCRIPTORS)
class MaybeShutdownSocket:
"""A wrapper around a socket which knows whether it has been shut down.
Because we may shut down a nailgun socket from one thread, and read from it on another, we use
this wrapper so that a shutting-down thread can signal to a reading thread that it should stop
reading.
lock guards access to is_shutdown, shutting down the socket, and any calls which need to guarantee
they don't race a shutdown call.
"""
def __init__(self, sock):
self.socket = sock
self.lock = threading.Lock()
self.is_shutdown = False
| |
import random
import logging
import numpy as np
def kill_signal(recordings, threshold, window_size):
"""
Thresholds recordings, values above 'threshold' are considered signal
(set to 0), a window of size 'window_size' is drawn around the signal
points and those observations are also killed
Returns
-------
recordings: numpy.ndarray
The modified recordings with values above the threshold set to 0
is_noise_idx: numpy.ndarray
A boolean array with the same shap as 'recordings' indicating if the
observation is noise (1) or was killed (0).
"""
recordings = np.copy(recordings)
T, C = recordings.shape
R = int((window_size-1)/2)
# this will hold a flag 1 (noise), 0 (signal) for every obseration in the
# recordings
is_noise_idx = np.zeros((T, C))
# go through every neighboring channel
for c in range(C):
# get obserations where observation is above threshold
idx_temp = np.where(np.abs(recordings[:, c]) > threshold)[0]
# shift every index found
for j in range(-R, R+1):
# shift
idx_temp2 = idx_temp + j
# remove indexes outside range [0, T]
idx_temp2 = idx_temp2[np.logical_and(idx_temp2 >= 0,
idx_temp2 < T)]
# set surviving indexes to nan
recordings[idx_temp2, c] = np.nan
# noise indexes are the ones that are not nan
# FIXME: compare to np.nan instead
is_noise_idx_temp = (recordings[:, c] == recordings[:, c])
# standarize data, ignoring nans
recordings[:, c] = recordings[:, c]/np.nanstd(recordings[:, c])
# set non noise indexes to 0 in the recordings
recordings[~is_noise_idx_temp, c] = 0
# save noise indexes
is_noise_idx[is_noise_idx_temp, c] = 1
return recordings, is_noise_idx
def noise_cov(recordings, temporal_size, window_size, sample_size=1000,
threshold=3.0, max_trials_per_sample=100,
allow_smaller_sample_size=False):
"""Compute noise temporal and spatial covariance
Parameters
----------
recordings: numpy.ndarray
Recordings
temporal_size:
Waveform size
sample_size: int
Number of noise snippets of temporal_size to search
threshold: float
Observations below this number are considered noise
Returns
-------
spatial_SIG: numpy.ndarray
temporal_SIG: numpy.ndarray
"""
logger = logging.getLogger(__name__)
# kill signal above threshold in recordings
logger.info('Get Noise Floor')
rec, is_noise_idx = kill_signal(recordings, threshold, window_size)
# compute spatial covariance, output: (n_channels, n_channels)
logger.info('Compute Spatial Covariance')
spatial_cov = np.divide(np.matmul(rec.T, rec),
np.matmul(is_noise_idx.T, is_noise_idx))
# compute spatial sig
w_spatial, v_spatial = np.linalg.eig(spatial_cov)
spatial_SIG = np.matmul(np.matmul(v_spatial,
np.diag(np.sqrt(w_spatial))),
v_spatial.T)
# apply spatial whitening to recordings
logger.info('Compute Temporal Covaraince')
spatial_whitener = np.matmul(np.matmul(v_spatial,
np.diag(1/np.sqrt(w_spatial))),
v_spatial.T)
rec = np.matmul(rec, spatial_whitener)
# search single noise channel snippets
noise_wf = search_noise_snippets(
rec, is_noise_idx, sample_size,
temporal_size,
channel_choices=None,
max_trials_per_sample=max_trials_per_sample,
allow_smaller_sample_size=allow_smaller_sample_size)
w, v = np.linalg.eig(np.cov(noise_wf.T))
temporal_SIG = np.matmul(np.matmul(v, np.diag(np.sqrt(w))), v.T)
return spatial_SIG, temporal_SIG
def search_noise_snippets(recordings, is_noise_idx, sample_size,
temporal_size, channel_choices=None,
max_trials_per_sample=100,
allow_smaller_sample_size=False):
"""
Randomly search noise snippets of 'temporal_size'
Parameters
----------
channel_choices: list
List of sets of channels to select at random on each trial
max_trials_per_sample: int, optional
Maximum random trials per sample
allow_smaller_sample_size: bool, optional
If 'max_trials_per_sample' is reached and this is True, the noise
snippets found up to that time are returned
Raises
------
ValueError
if after 'max_trials_per_sample' trials, no noise snippet has been
found this exception is raised
Notes
-----
Channels selected at random using the random module from the standard
library (not using np.random)
"""
logger = logging.getLogger(__name__)
T, C = recordings.shape
if channel_choices is None:
noise_wf = np.zeros((sample_size, temporal_size))
else:
lenghts = set([len(ch) for ch in channel_choices])
if len(lenghts) > 1:
raise ValueError('All elements in channel_choices must have '
'the same length, got {}'.format(lenghts))
n_channels = len(channel_choices[0])
noise_wf = np.zeros((sample_size, temporal_size, n_channels))
count = 0
logger.debug('Starting to search noise snippets...')
trial = 0
# repeat until you get sample_size noise snippets
while count < sample_size:
# random number for the start of the noise snippet
t_start = np.random.randint(T-temporal_size)
if channel_choices is None:
# random channel
ch = random.randint(0, C - 1)
else:
ch = random.choice(channel_choices)
t_slice = slice(t_start, t_start+temporal_size)
# get a snippet from the recordings and the noise flags for the same
# location
snippet = recordings[t_slice, ch]
snipped_idx_noise = is_noise_idx[t_slice, ch]
# check if all observations in snippet are noise
if snipped_idx_noise.all():
# add the snippet and increase count
noise_wf[count] = snippet
count += 1
trial = 0
logger.debug('Found %i/%i...', count, sample_size)
trial += 1
if trial == max_trials_per_sample:
if allow_smaller_sample_size:
return noise_wf[:count]
else:
raise ValueError("Couldn't find snippet {} of size {} after "
"{} iterations (only {} found)"
.format(count + 1, temporal_size,
max_trials_per_sample,
count))
return noise_wf
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
from config import *
import instances as inst
import util
def _prompt_config(lab, path):
"""Prompt for the lab configuration script"""
files = [f for f in os.listdir(path) if f.endswith('.py')]
files.sort()
if len(files) == 1:
return {'1': files[0].strip('.py')},'1'
else:
print "Available configurations for the '{0}' lab:\n".format(lab)
options = []
for f in files:
if f.endswith('.py'):
options.append(f.replace('.py',''))
return util.list_prompt('Which configuration would you like to execute?: ', options)
def list_available_labs():
"""List available labs in LAB_DIR"""
available_labs = [d for d in os.listdir(LAB_DIR)
if os.path.isdir(os.path.join(LAB_DIR, d))]
print "\nAvailable Labs:"
available_labs.sort()
for lab in available_labs:
print ' {0}'.format(lab)
print ''
def lab_description(lab):
"""Display information for a single lab"""
file = open(LAB_DIR + lab + '/description.md', 'r')
print '\n', file.read()
file.close()
def calculate_lab_tag(conn, user_vpc, lab):
"""Auto-increment lab ID numbers"""
labs = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab tags
for instance in instances:
if 'Lab' in instance.tags:
if instance.tags['Lab'].startswith(lab):
labs.append(instance.tags['Lab'])
# remove duplicates
labs = list(set(labs))
# find first unused number
count = 1
while lab + '-' + str(count) in labs:
count += 1
return lab + '-' + str(count)
def get_running_labs(conn, user_vpc):
"""List/Return all running labs"""
labs = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab tags
for instance in instances:
if 'Lab' in instance.tags:
labs.append(instance.tags['Lab'])
# remove duplicates
labs = list(set(labs))
labs.sort()
if labs:
print "\nRunning labs:"
for lab in labs:
print " ", lab
print ""
return labs
else:
print "\nNo labs running ...\n"
def lab_info(conn, user_vpc):
"""List all running labs in AWS"""
labs = get_running_labs(conn, user_vpc)
if labs:
for lab in labs:
print "Instances running in lab '{0}':".format(lab)
instances = get_lab_instance_info(conn, user_vpc, lab)
for instance in instances:
print instance
print ""
def get_user_instance_info(conn, user_vpc, lab_tag, user):
"""List IP/DNS for each instance for user"""
reservations = conn.get_all_instances(filters = {'vpc-id': user_vpc.id,
'tag:Lab': lab_tag,
'tag:User': user})
final = []
for r in reservations:
for instance in r.instances:
final.append("""
Name: {0}
IP: {1}
Private IP: {2}
Public DNS: {3}\n""".format(instance.tags['Name'],
instance.ip_address,
instance.private_ip_address,
instance.public_dns_name))
final.sort()
return final
def get_lab_instance_info(conn, user_vpc, lab_tag):
"""List instance info for lab"""
reservations = conn.get_all_instances(filters = {'vpc-id': user_vpc.id,
'tag:Lab': lab_tag})
final = []
for r in reservations:
for instance in r.instances:
final.append("""
Name: {0}
Lab: {1}
Region: {2}
IP: {3}
Private IP: {4}
Public DNS: {5}""".format(instance.tags['Name'],
instance.tags['Lab'],
str(instance.region).replace('RegionInfo:',''),
instance.ip_address,
instance.private_ip_address,
instance.public_dns_name))
final.sort()
return final
def launch_lab(conn, user_vpc, lab):
"""Execute a lab configuration"""
path = LAB_DIR + lab + '/scripts/'
prompt, answer = _prompt_config(lab, path)
# import lab configs
labmod = imp.load_source('labmod', path + prompt[answer] + '.py')
labmod.pre_process()
cfg = util.read_config(LAB_DIR + lab + '/instances.cfg')
# prompt for any dynamic configuration options
for instance in cfg['instance']:
for k, v in instance.iteritems():
if str(v).startswith('PROMPT:'):
instance[k] = raw_input('{0}: '.format(v.split(':')[1]))
if str(v).startswith('PROMPT#:'):
instance[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
for device in instance['device']:
for k, v in device.iteritems():
if str(v).startswith('PROMPT:'):
device[k] = raw_input('{0}: '.format(v.split(':')[1]))
if str(v).startswith('PROMPT#:'):
device[k] = int(raw_input('{0}: '.format(v.split(':')[1])))
# connection and required info
security_groups = conn.get_all_security_groups(filters = {'vpc-id': user_vpc.id})
subnets = conn.get_all_subnets(filters = {'vpc-id': user_vpc.id})
# launch
inst.launch_instances(conn, user_vpc, prompt[answer],
lab, labmod, cfg, security_groups, subnets)
labmod.post_process()
def terminate_lab(conn, user_vpc, lab_tag):
"""Terminate a single lab and all instances"""
instance_ids = []
instances = inst.get_vpc_instances(conn, user_vpc)
# get all lab instances
for instance in instances:
if 'Lab' in instance.tags:
if instance.tags['Lab'] == lab_tag:
instance_ids.append(instance.id)
conn.terminate_instances(instance_ids=instance_ids)
try:
with open('/host/{0}/key-pairs.txt'.format(VPC)) as users:
for user in users:
os.remove('/host/{0}/users/{1}/{2}.txt'.format(VPC, user.split('-')[0], lab_tag))
except:
print "No user files removed..."
print "\nTerminate request sent for all lab instances ..."
print "Lab '{0}' has been deleted ...\n".format(lab_tag)
| |
"""
.. module:: Dispatcher
:platform: Unix
:synopsis: A useful module indeed.
"""
from __future__ import with_statement
import logging
import socket
import time
from Queue import Queue
from itertools import groupby, ifilter, chain
import collections
try:
import simplejson as json
except ImportError:
import json
# from octopus.core import tools
from octopus.core import singletonconfig, singletonstats
from octopus.core.threadpool import ThreadPool, makeRequests, NoResultsPending
from octopus.core.framework import MainLoopApplication
from octopus.core.tools import elapsedTimeToString
from octopus.dispatcher.model import (DispatchTree, FolderNode, RenderNode,
Pool, PoolShare, enums)
from octopus.dispatcher.strategies import FifoStrategy
from octopus.dispatcher import settings
from octopus.dispatcher.db.pulidb import PuliDB
from octopus.dispatcher.model.enums import *
from octopus.dispatcher.poolman.filepoolman import FilePoolManager
from octopus.dispatcher.poolman.wspoolman import WebServicePoolManager
from octopus.dispatcher.licenses.licensemanager import LicenseManager
class Dispatcher(MainLoopApplication):
'''The Dispatcher class is the core of the dispatcher application.
It computes the assignments of commands to workers according to a
DispatchTree and handles all the communications with the workers and
clients.
'''
instance = None
init = False
def __new__(cls, framework):
if cls.instance is None:
# Disable passing framework to super__new__ call.
# It is automatically avaible via super class hierarchy
# This removes a deprecation warning when launching dispatcher
cls.instance = super(Dispatcher, cls).__new__(cls)
return cls.instance
def __init__(self, framework):
LOGGER = logging.getLogger('main.dispatcher')
if self.init:
return
self.init = True
self.nextCycle = time.time()
MainLoopApplication.__init__(self, framework)
self.threadPool = ThreadPool(16, 0, 0, None)
#
# Class holding custom infos on the dispatcher.
# This data can be periodically flushed in a specific log file for
# later use
#
self.cycle = 1
self.dispatchTree = DispatchTree()
self.licenseManager = LicenseManager()
self.enablePuliDB = settings.DB_ENABLE
self.cleanDB = settings.DB_CLEAN_DATA
self.restartService = False
self.pulidb = None
if self.enablePuliDB:
self.pulidb = PuliDB(self.cleanDB, self.licenseManager)
self.dispatchTree.registerModelListeners()
rnsAlreadyInitialized = self.initPoolsDataFromBackend()
if self.enablePuliDB and not self.cleanDB:
LOGGER.warning("--- Reloading database (9 steps) ---")
prevTimer = time.time()
self.pulidb.restoreStateFromDb(self.dispatchTree, rnsAlreadyInitialized)
LOGGER.warning("%d jobs reloaded from database" % len(self.dispatchTree.tasks))
LOGGER.warning("Total time elapsed %s" % elapsedTimeToString(prevTimer))
LOGGER.warning("")
LOGGER.warning("--- Checking dispatcher state (3 steps) ---")
startTimer = time.time()
LOGGER.warning("1/3 Update completion and status")
self.dispatchTree.updateCompletionAndStatus()
LOGGER.warning(" Elapsed time %s" % elapsedTimeToString(startTimer))
prevTimer = time.time()
LOGGER.warning("2/3 Update rendernodes")
self.updateRenderNodes()
LOGGER.warning(" Elapsed time %s" % elapsedTimeToString(prevTimer))
prevTimer = time.time()
LOGGER.warning("3/3 Validate dependencies")
self.dispatchTree.validateDependencies()
LOGGER.warning(" Elapsed time %s" % elapsedTimeToString(prevTimer))
LOGGER.warning("Total time elapsed %s" % elapsedTimeToString(startTimer))
LOGGER.warning("")
if self.enablePuliDB and not self.cleanDB:
self.dispatchTree.toModifyElements = []
# If no 'default' pool exists, create default pool
# When creating a pool with id=None, it is automatically appended in "toCreateElement" list in dispatcher and in the dispatcher's "pools" attribute
if 'default' not in self.dispatchTree.pools:
pool = Pool(None, name='default')
LOGGER.warning("Default pool was not loaded from DB, create a new default pool: %s" % pool)
self.defaultPool = self.dispatchTree.pools['default']
LOGGER.warning("--- Loading dispatch rules ---")
startTimer = time.time()
self.loadRules()
LOGGER.warning("Total time elapsed %s" % elapsedTimeToString(startTimer))
LOGGER.warning("")
# it should be better to have a maxsize
self.queue = Queue(maxsize=10000)
def initPoolsDataFromBackend(self):
'''
Loads pools and workers from appropriate backend.
'''
try:
if settings.POOLS_BACKEND_TYPE == "file":
manager = FilePoolManager()
elif settings.POOLS_BACKEND_TYPE == "ws":
manager = WebServicePoolManager()
elif settings.POOLS_BACKEND_TYPE == "db":
return False
except Exception:
return False
computers = manager.listComputers()
### recreate the pools
poolsList = manager.listPools()
poolsById = {}
for poolDesc in poolsList:
pool = Pool(id=int(poolDesc.id), name=str(poolDesc.name))
self.dispatchTree.toCreateElements.append(pool)
poolsById[pool.id] = pool
### recreate the rendernodes
rnById = {}
for computerDesc in computers:
try:
computerDesc.name = socket.getfqdn(computerDesc.name)
ip = socket.gethostbyname(computerDesc.name)
except socket.gaierror:
continue
renderNode = RenderNode(computerDesc.id, computerDesc.name + ":" + str(computerDesc.port), computerDesc.cpucount * computerDesc.cpucores, computerDesc.cpufreq, ip, computerDesc.port, computerDesc.ramsize, json.loads(computerDesc.properties))
self.dispatchTree.toCreateElements.append(renderNode)
## add the rendernodes to the pools
for pool in computerDesc.pools:
poolsById[pool.id].renderNodes.append(renderNode)
renderNode.pools.append(poolsById[pool.id])
self.dispatchTree.renderNodes[str(renderNode.name)] = renderNode
rnById[renderNode.id] = renderNode
# add the pools to the dispatch tree
for pool in poolsById.values():
self.dispatchTree.pools[pool.name] = pool
if self.cleanDB or not self.enablePuliDB:
graphs = FolderNode(1, "graphs", self.dispatchTree.root, "root", 0, 0, 0, FifoStrategy())
self.dispatchTree.toCreateElements.append(graphs)
self.dispatchTree.nodes[graphs.id] = graphs
ps = PoolShare(1, self.dispatchTree.pools["default"], graphs, PoolShare.UNBOUND)
self.dispatchTree.toCreateElements.append(ps)
if self.enablePuliDB:
# clean the tables pools and rendernodes (overwrite)
self.pulidb.dropPoolsAndRnsTables()
self.pulidb.createElements(self.dispatchTree.toCreateElements)
self.dispatchTree.resetDbElements()
return True
def shutdown(self):
'''
Clean procedure before shutting done puli server.
'''
logging.getLogger('main').warning("-----------------------------------------------")
logging.getLogger('main').warning("Exit event caught: closing dispatcher...")
try:
self.dispatchTree.updateCompletionAndStatus()
logging.getLogger('main').warning("[OK] update completion and status")
except Exception:
logging.getLogger('main').warning("[HS] update completion and status")
try:
self.updateRenderNodes()
logging.getLogger('main').warning("[OK] update render nodes")
except Exception:
logging.getLogger('main').warning("[HS] update render nodes")
try:
self.dispatchTree.validateDependencies()
logging.getLogger('main').warning("[OK] validate dependencies")
except Exception:
logging.getLogger('main').warning("[HS] validate dependencies")
try:
self.updateDB()
logging.getLogger('main').warning("[OK] update DB")
except Exception:
logging.getLogger('main').warning("[HS] update DB")
def loadRules(self):
from .rules.graphview import GraphViewBuilder
graphs = self.dispatchTree.findNodeByPath("/graphs", None)
if graphs is None:
logging.getLogger('main.dispatcher').fatal("No '/graphs' node, impossible to load rule for /graphs.")
self.stop()
self.dispatchTree.rules.append(GraphViewBuilder(self.dispatchTree, graphs))
def prepare(self):
pass
def stop(self):
'''Stops the application part of the dispatcher.'''
#self.httpRequester.stopAll()
pass
@property
def modified(self):
return bool(self.dispatchTree.toArchiveElements or
self.dispatchTree.toCreateElements or
self.dispatchTree.toModifyElements)
def mainLoop(self):
'''
| Dispatcher main loop iteration.
| Periodically called with tornado'sinternal callback mecanism, the frequency is defined by config: CORE.MASTER_UPDATE_INTERVAL
| During this process, the dispatcher will:
| - update completion and status for all jobs in dispatchTree
| - update status of renderNodes
| - validate inter tasks dependencies
| - update the DB with recorded changes in the model
| - compute new assignments and send them to the proper rendernodes
| - release all finished jobs/rns
'''
log = logging.getLogger('main')
loopStartTime = time.time()
prevTimer = loopStartTime
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleDate = loopStartTime
log.info("-----------------------------------------------------")
log.info(" Start dispatcher process cycle (old version).")
try:
self.threadPool.poll()
except NoResultsPending:
pass
else:
log.info("finished some network requests")
pass
self.cycle += 1
# Update of allocation is done when parsing the tree for completion and status update (done partially for invalidated node only i.e. when needed)
self.dispatchTree.updateCompletionAndStatus()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['update_tree'] = time.time() - prevTimer
log.info("%8.2f ms --> update completion status" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
# Update render nodes
self.updateRenderNodes()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['update_rn'] = time.time() - prevTimer
log.info("%8.2f ms --> update render node" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
# Validate dependencies
self.dispatchTree.validateDependencies()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['update_dependencies'] = time.time() - prevTimer
log.info("%8.2f ms --> validate dependencies" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
# update db
self.updateDB()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['update_db'] = time.time() - prevTimer
log.info("%8.2f ms --> update DB" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
# compute and send command assignments to rendernodes
assignments = self.computeAssignments()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['compute_assignment'] = time.time() - prevTimer
log.info("%8.2f ms --> compute assignments." % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
self.sendAssignments(assignments)
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['send_assignment'] = time.time() - prevTimer
singletonstats.theStats.cycleCounts['num_assignments'] = len(assignments)
log.info("%8.2f ms --> send %r assignments." % ((time.time() - prevTimer) * 1000, len(assignments)))
prevTimer = time.time()
# call the release finishing status on all rendernodes
for renderNode in self.dispatchTree.renderNodes.values():
renderNode.releaseFinishingStatus()
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['release_finishing'] = time.time() - prevTimer
log.info("%8.2f ms --> releaseFinishingStatus" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
loopDuration = (time.time() - loopStartTime)*1000
log.info("%8.2f ms --> cycle ended. " % loopDuration)
#
# Send stat data to disk
#
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleTimers['time_elapsed'] = time.time() - loopStartTime
singletonstats.theStats.aggregate()
def updateDB(self):
if settings.DB_ENABLE:
self.pulidb.createElements(self.dispatchTree.toCreateElements)
self.pulidb.updateElements(self.dispatchTree.toModifyElements)
self.pulidb.archiveElements(self.dispatchTree.toArchiveElements)
# logging.getLogger('main.dispatcher').info(" UpdateDB: create=%d update=%d delete=%d" % (len(self.dispatchTree.toCreateElements), len(self.dispatchTree.toModifyElements), len(self.dispatchTree.toArchiveElements)) )
self.dispatchTree.resetDbElements()
def computeAssignments(self):
'''Computes and returns a list of (rendernode, command) assignments.'''
LOGGER = logging.getLogger('main')
from .model.node import NoRenderNodeAvailable, NoLicenseAvailableForTask
# if no rendernodes available, return
if not any(rn.isAvailable() for rn in self.dispatchTree.renderNodes.values()):
return []
# first create a set of entrypoints that are not done nor cancelled nor blocked nor paused and that have at least one command ready
# FIXME: hack to avoid getting the 'graphs' poolShare node in entryPoints, need to avoid it more nicely...
entryPoints = set([poolShare.node for poolShare in self.dispatchTree.poolShares.values()
if poolShare.node.status not in (NODE_BLOCKED, NODE_DONE, NODE_CANCELED, NODE_PAUSED) and poolShare.node.readyCommandCount > 0 and poolShare.node.name != 'graphs'])
# don't proceed to the calculation if no render nodes available in the requested pools
isRenderNodesAvailable = False
for pool, jobsIterator in groupby(entryPoints, lambda x: x.mainPoolShare().pool):
renderNodesAvailable = set([rn for rn in pool.renderNodes if rn.status not in [RN_UNKNOWN, RN_PAUSED, RN_WORKING]])
if len(renderNodesAvailable):
isRenderNodesAvailable = True
break
if not isRenderNodesAvailable:
return []
# Log time updating max rn
prevTimer = time.time()
# sort by pool for the groupby
entryPoints = sorted(entryPoints, key=lambda node: node.mainPoolShare().pool)
# update the value of the maxrn for the poolshares (parallel dispatching)
for pool, jobsIterator in groupby(entryPoints, lambda x: x.mainPoolShare().pool):
# we are treating every active job of the pool
jobsList = [job for job in jobsIterator]
# the new maxRN value is calculated based on the number of active jobs of the pool, and the number of online rendernodes of the pool
onlineRenderNodes = set([rn for rn in pool.renderNodes if rn.status not in [RN_UNKNOWN, RN_PAUSED]])
nbOnlineRenderNodes = len(onlineRenderNodes)
# LOGGER.debug("@ - nb rns awake:%r" % (nbOnlineRenderNodes) )
# if we have a userdefined maxRN for some nodes, remove them from the list and substracts their maxRN from the pool's size
l = jobsList[:] # duplicate the list to be safe when removing elements
for job in l:
# LOGGER.debug("@ - checking userDefMaxRN: %s -> %r maxRN=%d" % (job.name, job.mainPoolShare().userDefinedMaxRN, job.mainPoolShare().maxRN ) )
if job.mainPoolShare().userDefinedMaxRN and job.mainPoolShare().maxRN not in [-1, 0]:
# LOGGER.debug("@ removing: %s -> maxRN=%d" % (job.name, job.mainPoolShare().maxRN ) )
jobsList.remove(job)
nbOnlineRenderNodes -= job.mainPoolShare().maxRN
# LOGGER.debug("@ - nb rns awake after maxRN:%d" % (nbOnlineRenderNodes) )
if len(jobsList) == 0:
continue
# Prepare updatedMaxRN with dispatch key proportions
# list of dks (integer only)
dkList = [job.dispatchKey for job in jobsList]
nbJobs = len(jobsList) # number of jobs in the current pool
nbRNAssigned = 0 # number of render nodes assigned for this pool
dkMin = min(dkList)
# dkPositiveList: Shift all dks values in order that each min value of dk becomes 1
dkPositiveList = map(lambda x: x-dkMin+1, dkList) # dk values start at 1
dkSum = sum(dkPositiveList)
# sort by id (fifo)
jobsList = sorted(jobsList, key=lambda x: x.id)
# then sort by dispatchKey (priority)
jobsList = sorted(jobsList, key=lambda x: x.dispatchKey, reverse=True)
for dk, jobIterator in groupby(jobsList, lambda x: x.dispatchKey):
jobs = [job for job in jobIterator]
# dkPositive: Shift all dks values in order that each min value of dk becomes 1
dkPositive = dk - dkMin + 1
# Proportion of render nodes for
updatedmaxRN = int(round(nbOnlineRenderNodes * (dkPositive / float(dkSum))))
for job in jobs:
job.mainPoolShare().maxRN = updatedmaxRN
nbRNAssigned += updatedmaxRN
# PRA: Here is the main choice!
# Add remaining RNs to most important jobs (to fix rounding errors)
unassignedRN = nbOnlineRenderNodes - nbRNAssigned
while unassignedRN > 0:
for job in jobsList:
if unassignedRN <= 0:
break
job.mainPoolShare().maxRN += 1
unassignedRN -= 1
if singletonconfig.get('CORE','GET_STATS'):
singletonstats.theStats.assignmentTimers['update_max_rn'] = time.time() - prevTimer
LOGGER.info( "%8.2f ms --> .... updating max RN values", (time.time() - prevTimer)*1000 )
# now, we are treating every nodes
# sort by id (fifo)
entryPoints = sorted(entryPoints, key=lambda node: node.id)
# then sort by dispatchKey (priority)
entryPoints = sorted(entryPoints, key=lambda node: node.dispatchKey, reverse=True)
# Put nodes with a userDefinedMaxRN first
userDefEntryPoints = ifilter(lambda node: node.mainPoolShare().userDefinedMaxRN, entryPoints)
standardEntryPoints = ifilter(lambda node: not node.mainPoolShare().userDefinedMaxRN, entryPoints)
scoredEntryPoints = chain(userDefEntryPoints, standardEntryPoints)
# Log time dispatching RNs
prevTimer = time.time()
# Iterate over each entryPoint to get an assignment
assignments = [] # list of (renderNode, Command)
for entryPoint in scoredEntryPoints:
# If we have dedicated render nodes for this poolShare
if not any([poolShare.hasRenderNodesAvailable() for poolShare in entryPoint.poolShares.values()]):
continue
try:
for (rn, com) in entryPoint.dispatchIterator(lambda: self.queue.qsize() > 0):
assignments.append((rn, com))
# increment the allocatedRN for the poolshare
entryPoint.mainPoolShare().allocatedRN += 1
# save the active poolshare of the rendernode
rn.currentpoolshare = entryPoint.mainPoolShare()
except NoRenderNodeAvailable:
pass
except NoLicenseAvailableForTask:
LOGGER.info("Missing license for node \"%s\" (other commands can start anyway)." % entryPoint.name)
pass
assignmentDict = collections.defaultdict(list)
for (rn, com) in assignments:
assignmentDict[rn].append(com)
if singletonconfig.get('CORE','GET_STATS'):
singletonstats.theStats.assignmentTimers['dispatch_command'] = time.time() - prevTimer
LOGGER.info( "%8.2f ms --> .... dispatching commands", (time.time() - prevTimer)*1000 )
#
# Check replacements
#
# - faire une passe pour les jobs n'ayant pas leur part de gateau
# - identifier dans leur pool les jobs killable
# - pour chaque ressource, si match : on jette le job en cours ET on desactive son attribut killable
#
# Backfill
#
# TODO refaire une passe pour les jobs ayant un attribut "killable" et au moins une pool additionnelle
return assignmentDict.items()
def updateRenderNodes(self):
for rendernode in self.dispatchTree.renderNodes.values():
rendernode.updateStatus()
def sendAssignments(self, assignmentList):
'''Processes a list of (rendernode, command) assignments.'''
def sendAssignment(args):
rendernode, commands = args
failures = []
for command in commands:
headers = {}
if not rendernode.idInformed:
headers["rnId"] = rendernode.id
root = command.task
ancestors = [root]
while root.parent:
root = root.parent
ancestors.append(root)
arguments = {}
environment = {
'PULI_USER': command.task.user,
'PULI_ALLOCATED_MEMORY': unicode(rendernode.usedRam[command.id]),
'PULI_ALLOCATED_CORES': unicode(rendernode.usedCoresNumber[command.id]),
}
for ancestor in ancestors:
arguments.update(ancestor.arguments)
environment.update(ancestor.environment)
arguments.update(command.arguments)
log = logging.getLogger('assign')
log.info("Sending command: %d from task %s to %s" % (command.id, command.task.name, rendernode))
commandDict = {
"id": command.id,
"runner": str(command.task.runner),
"arguments": arguments,
"validationExpression": command.task.validationExpression,
"taskName": command.task.name,
"relativePathToLogDir": "%d" % command.task.id,
"environment": environment,
"runnerPackages": command.runnerPackages,
"watcherPackages": command.watcherPackages
}
body = json.dumps(commandDict)
headers["Content-Length"] = len(body)
headers["Content-Type"] = "application/json"
try:
resp, data = rendernode.request("POST", "/commands/", body, headers)
if not resp.status == 202:
logging.getLogger('main.dispatcher').error("Assignment request failed: command %d on worker %s", command.id, rendernode.name)
failures.append((rendernode, command))
else:
logging.getLogger('main.dispatcher').info("Sent assignment of command %d to worker %s", command.id, rendernode.name)
except rendernode.RequestFailed, e:
logging.getLogger('main.dispatcher').error("Assignment of command %d to worker %s failed. Worker is likely dead (%r)", command.id, rendernode.name, e)
failures.append((rendernode, command))
return failures
requests = makeRequests(sendAssignment, [[a, b] for (a, b) in assignmentList], self._assignmentFailed)
for request in requests:
self.threadPool.putRequest(request)
def _assignmentFailed(self, request, failures):
for assignment in failures:
rendernode, command = assignment
rendernode.clearAssignment(command)
command.clearAssignment()
logging.getLogger('main.dispatcher').info(" - assignment cleared: command[%r] on rn[%r]" % (command.id, rendernode.name))
def handleNewGraphRequestApply(self, graph):
'''Handles a graph submission request and closes the given ticket
according to the result of the process.
'''
prevTimer = time.time()
nodes = self.dispatchTree.registerNewGraph(graph)
logging.getLogger('main.dispatcher').info("%.2f ms --> graph registered" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
# handles the case of post job with paused status
for node in nodes:
try:
if node.tags['paused'] == 'true' or node.tags['paused'] == True:
node.setPaused(True)
except KeyError:
continue
logging.getLogger('main.dispatcher').info("%.2f ms --> jobs set in pause if needed" % ((time.time() - prevTimer) * 1000))
prevTimer = time.time()
logging.getLogger('main.dispatcher').info('Added graph "%s" to the model.' % graph['name'])
return nodes
def updateCommandApply(self, dct):
'''
Called from a RN with a json desc of a command (ie rendernode info, command info etc).
Raise an execption to tell caller to send a HTTP404 response to RN, if not error a HTTP200 will be send instead
'''
log = logging.getLogger('main.dispatcher')
commandId = dct['id']
renderNodeName = dct['renderNodeName']
try:
command = self.dispatchTree.commands[commandId]
except KeyError:
raise KeyError("Command not found: %d" % commandId)
if not command.renderNode:
# souldn't we reassign the command to the rn??
raise KeyError("Command %d (%d) is no longer registered on rendernode %s" % (commandId, int(dct['status']), renderNodeName))
elif command.renderNode.name != renderNodeName:
# in this case, kill the command running on command.renderNode.name
# rn = command.renderNode
# rn.clearAssignment(command)
# rn.request("DELETE", "/commands/" + str(commandId) + "/")
log.warning("The emitting RN %s is different from the RN assigned to the command in pulimodel: %s." % (renderNodeName, command.renderNode.name))
raise KeyError("Command %d is running on a different rendernode (%s) than the one in puli's model (%s)." % (commandId, renderNodeName, command.renderNode.name))
rn = command.renderNode
rn.lastAliveTime = max(time.time(), rn.lastAliveTime)
# if command is no more in the rn's list, it means the rn was reported as timeout or asynchronously removed from RN
if commandId not in rn.commands:
if len(rn.commands) == 0 and command.status is not enums.CMD_CANCELED:
# in this case, re-add the command to the list of the rendernode
rn.commands[commandId] = command
# we should re-reserve the lic
rn.reserveLicense(command, self.licenseManager)
log.warning("re-assigning command %d on %s. (TIMEOUT?)" % (commandId, rn.name))
# Command is already remove from RN at this point (it happens when receiving a CANCEL order from external GUI)
# else:
# # The command has been cancelled on the dispatcher but update from RN only arrives now
# log.warning("Status update for %d (%d) from %s but command is currently assigned." % (commandId, int(dct['status']), renderNodeName))
if "status" in dct:
command.status = int(dct['status'])
if "completion" in dct and command.status == enums.CMD_RUNNING:
command.completion = float(dct['completion'])
command.message = dct['message']
if "validatorMessage" in dct:
command.validatorMessage = dct['validatorMessage']
command.errorInfos = dct['errorInfos']
if command.validatorMessage:
command.status = enums.CMD_ERROR
# Stats info received and not none. Means we need to update it on the command.
# If stats received is none, no change on the worker, we do not update the command.
if "stats" in dct and dct["stats"] is not None:
command.stats = dct["stats"]
def queueWorkload(self, workload):
self.queue.put(workload)
| |
import os
import warnings
from tempfile import mkstemp
import numpy as np
from nipy.testing import assert_true, assert_equal, assert_raises, \
assert_array_equal, assert_array_almost_equal, funcfile, parametric
from nipy.testing.decorators import if_templates
from nipy.utils import templates, DataError
from nipy.io.api import load_image, save_image, as_image
from nipy.core.api import fromarray
gfilename = ''
gtmpfile = None
gimg = None
def load_template_img():
return load_image(
templates.get_filename(
'ICBM152', '2mm', 'T1.nii.gz'))
def setup_module():
warnings.simplefilter("ignore")
global gfilename, gtmpfile, gimg
try:
gimg = load_template_img()
except DataError:
pass
fd, gfilename = mkstemp(suffix='.nii.gz')
gtmpfile = open(gfilename)
def teardown_module():
warnings.resetwarnings()
global gtmpfile, gfilename
gtmpfile.close()
os.unlink(gfilename)
def test_badfile():
filename = "bad_file.foo"
yield assert_raises, RuntimeError, load_image, filename
@if_templates
def test_maxminmean_values():
# loaded array values from SPM
y = np.asarray(gimg)
yield assert_equal, y.shape, tuple(gimg.shape)
yield assert_array_almost_equal, y.max(), 1.000000059
yield assert_array_almost_equal, y.mean(), 0.273968048
yield assert_equal, y.min(), 0.0
@if_templates
def test_nondiag():
gimg.affine[0,1] = 3.0
save_image(gimg, gtmpfile.name)
img2 = load_image(gtmpfile.name)
yield assert_true, np.allclose(img2.affine, gimg.affine)
def uint8_to_dtype(dtype, name):
dtype = dtype
shape = (2,3,4)
dmax = np.iinfo(np.uint8).max
data = np.random.randint(0, dmax, size=shape)
data[0,0,0] = 0
data[1,0,0] = dmax
data = data.astype(np.uint8) # randint returns np.int32
img = fromarray(data, 'kji', 'zxy')
newimg = save_image(img, name, dtype=dtype)
newdata = np.asarray(newimg)
return newdata, data
def test_scaling_uint8_to_uint8():
dtype = np.uint8
newdata, data = uint8_to_dtype(dtype, gtmpfile.name)
yield assert_true, np.allclose(newdata, data)
def test_scaling_uint8_to_uint16():
dtype = np.uint16
newdata, data = uint8_to_dtype(dtype, gtmpfile.name)
yield assert_true, np.allclose(newdata, data)
def test_scaling_uint8_to_float32():
dtype = np.float32
newdata, data = uint8_to_dtype(dtype, gtmpfile.name)
yield assert_true, np.allclose(newdata, data)
def test_scaling_uint8_to_int32():
dtype = np.int32
newdata, data = uint8_to_dtype(dtype, gtmpfile.name)
yield assert_true, np.allclose(newdata, data)
def float32_to_dtype(dtype):
# Utility function for the scaling_float32 function
dtype = dtype
shape = (2,3,4)
# set some value value for scaling our data
scale = np.iinfo(np.uint16).max * 2.0
data = np.random.normal(size=(2,3,4), scale=scale)
data[0,0,0] = np.finfo(np.float32).max
data[1,0,0] = np.finfo(np.float32).min
# random.normal will return data as native machine type
data = data.astype(np.float32)
img = fromarray(data, 'kji', 'zyx')
newimg = save_image(img, gtmpfile.name, dtype=dtype)
newdata = np.asarray(newimg)
return newdata, data
def test_scaling_float32():
for dtype in (np.uint8, np.uint16, np.int16, np.float32):
newdata, data = float32_to_dtype(dtype)
yield assert_array_almost_equal, newdata, data
@if_templates
def test_header_roundtrip():
img = load_template_img()
fd, name = mkstemp(suffix='.nii.gz')
tmpfile = open(name)
hdr = img.header
# Update some header values and make sure they're saved
hdr['slice_duration'] = 0.200
hdr['intent_p1'] = 2.0
hdr['descrip'] = 'descrip for TestImage:test_header_roundtrip'
hdr['slice_end'] = 12
img.header = hdr
save_image(img, tmpfile.name)
newimg = load_image(tmpfile.name)
newhdr = newimg.header
tmpfile.close()
os.unlink(name)
yield (assert_array_almost_equal,
newhdr['slice_duration'],
hdr['slice_duration'])
yield assert_equal, newhdr['intent_p1'], hdr['intent_p1']
yield assert_equal, newhdr['descrip'], hdr['descrip']
yield assert_equal, newhdr['slice_end'], hdr['slice_end']
@if_templates
def test_file_roundtrip():
img = load_template_img()
fd, name = mkstemp(suffix='.nii.gz')
tmpfile = open(name)
save_image(img, tmpfile.name)
img2 = load_image(tmpfile.name)
data = np.asarray(img)
data2 = np.asarray(img2)
tmpfile.close()
os.unlink(name)
# verify data
yield assert_true, np.allclose(data2, data)
yield assert_true, np.allclose(data2.mean(), data.mean())
yield assert_true, np.allclose(data2.min(), data.min())
yield assert_true, np.allclose(data2.max(), data.max())
# verify shape and ndims
yield assert_equal, img2.shape, img.shape
yield assert_equal, img2.ndim, img.ndim
# verify affine
yield assert_true, np.allclose(img2.affine, img.affine)
def test_roundtrip_fromarray():
data = np.random.rand(10,20,30)
img = fromarray(data, 'kji', 'xyz')
fd, name = mkstemp(suffix='.nii.gz')
tmpfile = open(name)
save_image(img, tmpfile.name)
img2 = load_image(tmpfile.name)
data2 = np.asarray(img2)
tmpfile.close()
os.unlink(name)
# verify data
yield assert_true, np.allclose(data2, data)
yield assert_true, np.allclose(data2.mean(), data.mean())
yield assert_true, np.allclose(data2.min(), data.min())
yield assert_true, np.allclose(data2.max(), data.max())
# verify shape and ndims
yield assert_equal, img2.shape, img.shape
yield assert_equal, img2.ndim, img.ndim
# verify affine
yield assert_true, np.allclose(img2.affine, img.affine)
@parametric
def test_as_image():
# test image creation / pass through function
img = as_image(funcfile) # string filename
img1 = as_image(unicode(funcfile))
img2 = as_image(img)
yield assert_equal(img.affine, img1.affine)
yield assert_array_equal(np.asarray(img), np.asarray(img1))
yield assert_true(img is img2)
| |
# coding=utf-8
# Copyright 2022 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Define datasets for OOD detection tasks."""
import abc
from typing import Callable, Optional
from robustness_metrics.common import ops
from robustness_metrics.common import types
from robustness_metrics.datasets import base
from robustness_metrics.datasets import tfds as rm_tfds
import tensorflow as tf
def _set_label_to_one(feature):
feature["label"] = tf.ones_like(feature["label"])
feature["metadata"]["label"] = tf.ones_like(feature["label"])
return feature
def _set_label_to_zero(feature):
feature["label"] = tf.zeros_like(feature["label"])
feature["metadata"]["label"] = tf.zeros_like(feature["label"])
return feature
def _keep_common_fields(feature, spec):
"""Delete the keys of feature that are not in spec."""
if not isinstance(feature, dict): return feature
common_keys = set(feature.keys()) & set(spec.keys())
return {
key: _keep_common_fields(feature[key], spec[key]) for key in common_keys
}
def _concatenate(in_ds: tf.data.Dataset,
out_ds: tf.data.Dataset) -> tf.data.Dataset:
"""Concatenate in_ds and out_ds, making sure they have compatible specs."""
in_spec = in_ds.element_spec
out_spec = out_ds.element_spec
def format_in_ds(feature):
feature = _set_label_to_one(feature)
return _keep_common_fields(feature, out_spec)
def format_out_ds(feature):
feature = _set_label_to_zero(feature)
return _keep_common_fields(feature, in_spec)
return in_ds.map(format_in_ds).concatenate(out_ds.map(format_out_ds))
def _make_element_id_unique(dataset_tag: str):
"""We make element_id differ in the in- and out-of-distribution datasets."""
dataset_fingerprint = ops.fingerprint_int64(dataset_tag)
def _make_element_id_unique_fn(feature):
fingerprint = (feature["metadata"]["element_id"], dataset_fingerprint)
feature["metadata"]["element_id"] = ops.fingerprint_int64(fingerprint)
return feature
return _make_element_id_unique_fn
class OodDetectionDataset(base.Dataset, metaclass=abc.ABCMeta):
"""A dataset made of a pair of one in- and one out-of-distribution datasets.
In this binary (detection) task, the in-distribution dataset has labels 1 and
the out-of-distrbution dataset has labels 0.
See https://arxiv.org/pdf/2106.03004.pdf for more background.
"""
@property
def info(self) -> base.DatasetInfo:
return base.DatasetInfo(num_classes=2)
@abc.abstractproperty
def in_dataset(self) -> base.Dataset:
"""The in-distribution dataset."""
@abc.abstractproperty
def out_dataset(self) -> base.Dataset:
"""The out-of-distribution dataset."""
def load(self,
preprocess_fn: Optional[Callable[[types.Features], types.Features]]
) -> tf.data.Dataset:
in_ds = self.in_dataset.load(preprocess_fn)
in_ds = in_ds.map(_make_element_id_unique("in_ds"))
out_ds = self.out_dataset.load(preprocess_fn)
out_ds = out_ds.map(_make_element_id_unique("out_ds"))
return _concatenate(in_ds, out_ds)
# The choices of the pairing of the datasets are motivated by the setting of:
# https://arxiv.org/pdf/2106.03004.pdf, Appendix C.
@base.registry.register("cifar10_vs_cifar100")
class Cifar10VsCifar100Dataset(OodDetectionDataset):
"""The CIFAR-10 vs. CIFAR-100 ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar10Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.Cifar100Dataset()
@base.registry.register("cifar10_vs_dtd")
class Cifar10VsDtdDataset(OodDetectionDataset):
"""The CIFAR-10 vs. DTD ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar10Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.DtdDataset()
@base.registry.register("cifar10_vs_places365")
class Cifar10VsPlaces365Dataset(OodDetectionDataset):
"""The CIFAR-10 vs. Places365 ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar10Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.Places365Dataset()
@base.registry.register("cifar10_vs_svhn")
class Cifar10VsSvhnDataset(OodDetectionDataset):
"""The CIFAR-10 vs. SVHN ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar10Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.SvhnDataset()
@base.registry.register("cifar100_vs_cifar10")
class Cifar100VsCifar10Dataset(OodDetectionDataset):
"""The CIFAR-100 vs. CIFAR-10 ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar100Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.Cifar10Dataset()
@base.registry.register("cifar100_vs_dtd")
class Cifar100VsDtdDataset(OodDetectionDataset):
"""The CIFAR-100 vs. DTD ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar100Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.DtdDataset()
@base.registry.register("cifar100_vs_places365")
class Cifar100VsPlaces365Dataset(OodDetectionDataset):
"""The CIFAR-100 vs. Places365 ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar100Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.Places365Dataset()
@base.registry.register("cifar100_vs_svhn")
class Cifar100VsSvhnDataset(OodDetectionDataset):
"""The CIFAR-100 vs. SVHN ood detection dataset."""
@property
def in_dataset(self) -> base.Dataset:
return rm_tfds.Cifar100Dataset()
@property
def out_dataset(self) -> base.Dataset:
return rm_tfds.SvhnDataset()
| |
import f5
import f5.util
import re
from bigsuds import ServerError
# Convert PoolMember objects into a list of address, port dictionaries
def pms_to_addrportsq(poolmembers):
""" Converts PoolMembers into a list of address, port dictionaries """
return [{'address': p._node.name, 'port': p._port} for p in poolmembers]
# Truncate lbmethod
def munge_lbmethod(lbmethods):
return [l[10:].lower() for l in lbmethods]
# Un-truncate lbmethod
def unmunge_lbmethod(lbmethods):
return['LB_METHOD_' + l.upper() for l in lbmethods]
class Pool(object):
__version = 11
__wsdl = 'LocalLB.Pool'
def __init__(self, name, lb=None, description=None, lbmethod=None,
members=None, minimum_active_member=None, minimum_up_member=None,
slow_ramp_time=None, fromdict=None):
self._lb = lb
if fromdict is not None:
if lb is not None:
self.dictionary = fromdict
else:
self._dictionary = fromdict
else:
self.__name = name
self._active_member_count = None
self._description = description
self._lbmethod = lbmethod
self._members = members
self._minimum_active_member = minimum_active_member
self._minimum_up_member = minimum_up_member
self._slow_ramp_time = None
self._statistics = None
self._lbcall = self.__lbcall
def __repr__(self):
return "f5.Pool('%s')" % (self._name)
def __str__(self):
return self._name
# This just adds the wsdl to calls to the lb for convenience
def __lbcall(self, call, *args, **kwargs):
return self.lb._call(self.__wsdl + '.' + call, *args, **kwargs)
@classmethod
def _lbcall(cls, lb, call, *args, **kwargs):
return lb._call(cls.__wsdl + '.' + call, *args, **kwargs)
###########################################################################
# Properties
###########################################################################
# Asynchronous properties are prefixed with a '_'
#
# All properties are fetched directly from the lb, but also stored in local
# variables prefixed with an underscore '_' for convenience.
#
# If you want to fetch an attribute without calling the lb, get the
# attribute prefixed with an underscore.
#### LB ####
@property
def lb(self):
return self._lb
@lb.setter
def lb(self, value):
self.refresh()
self._lb = value
#### NAME ####
@property
def name(self):
return self.__name
@property
def _name(self):
return self.__name
@_name.setter
@f5.util.updatefactorycache
def _name(self, value):
self.__name = name
#### ACTIVE_MEMBER_COUNT ####
@property
def active_member_count(self):
self._active_member_count = self._lbcall('get_active_member_count', [self._name])[0]
return self._active_member_count
#### DESCRIPTION ####
@property
def description(self):
self._description = self._lbcall('get_description', [self._name])[0]
return self._description
@description.setter
@f5.util.lbwriter2
def description(self, value):
self._lbcall('set_description', [self._name], [value])
self._description = value
#### LBMETHOD ####
@property
def lbmethod(self):
self._lbmethod = munge_lbmethod(self._lbcall('get_lb_method', [self._name]))[0]
return self._lbmethod
@lbmethod.setter
def lbmethod(self, value):
self._lbcall('set_lb_method', [self._name], unmunge_lbmethod([value]))
self._lbmethod = value.lower()
#### MEMBERS ####
@property
def members(self):
self._members = f5.PoolMember._get(self._lb, pools=[self], minimal=True)
return self._members
@members.setter
@f5.util.lbtransaction
def members(self, value):
current = self._lbcall('get_member', [self._name])
should = pms_to_addrportsq(value)
self._lbcall('remove_member', [self._name], [current])
self._lbcall('add_member', [self._name], [should])
self._members = value
#### MINIMUM_ACTIVE_MEMBER ####
@property
def minimum_active_member(self):
self._minimum_active_member = self._lbcall(
'get_minimum_active_member', [self._name])[0]
return self._minimum_active_member
@minimum_active_member.setter
@f5.util.lbwriter2
def minimum_active_member(self, value):
self._lbcall('set_minimum_active_member', [self._name], [value])
self._minimum_active_member = value
#### MINIMUM_UP_MEMBER ####
@property
def minimum_up_member(self):
self._minimum_up_member = self._lbcall(
'get_minimum_up_member', [self._name])[0]
return self._minimum_up_member
@minimum_up_member.setter
@f5.util.lbwriter2
def minimum_up_member(self, value):
self._lbcall('set_minimum_up_member', [self._name], [value])
self._minimum_up_member = value
#### SLOW_RAMP_TIME ####
@property
def slow_ramp_time(self):
self._slow_ramp_time = self._lbcall(
'get_slow_ramp_time', [self._name])[0]
return self._slow_ramp_time
@slow_ramp_time.setter
@f5.util.lbwriter2
def slow_ramp_time(self, value):
self._lbcall('set_slow_ramp_time', [self._name], [value])
self._slow_ramp_time = value
#### STATISTICS ####
@property
def statistics(self):
self._statistics = self._lbcall('get_statistics',
[self._name])['statistics'][0]
return self._statistics
###########################################################################
# Private API
###########################################################################
@classmethod
def _get_objects(cls, lb, names, minimal=False):
"""Returns a list of Pool objects from a list of pool names"""
if not names:
return []
pools = cls.factory.create(names, lb)
if not minimal:
active_member_count = cls._lbcall(lb, 'get_active_member_count',
names)
description = cls._lbcall(lb, 'get_description', names)
lbmethod = cls._lbcall(lb, 'get_lb_method', names)
members = cls._lbcall(lb, 'get_member', names)
minimum_active_member = cls._lbcall(lb, 'get_minimum_active_member',
names)
minimum_up_member = cls._lbcall(lb, 'get_minimum_up_member',
names)
slow_ramp_time = cls._lbcall(lb, 'get_slow_ramp_time', names)
statistics = cls._lbcall(lb, 'get_statistics', names)
for idx,pool in enumerate(pools):
pool._active_member_count = active_member_count[idx]
pool._description = description[idx]
pool._lbmethod = lbmethod[idx]
pool._minimum_active_member = minimum_active_member[idx]
pool._minimum_up_member = minimum_up_member[idx]
pool._slow_ramp_time = slow_ramp_time[idx]
pool._statistics = statistics['statistics'][idx]
pool._members = f5.PoolMember._get_objects(lb, [pool],
[members[idx]], minimal=True)
return pools
@classmethod
def _get(cls, lb, pattern=None, minimal=False):
names = cls._lbcall(lb, 'get_list')
if not names:
return []
if pattern is not None:
if not isinstance(pattern, re._pattern_type):
pattern = re.compile(pattern)
names = [name for name in names if pattern.match(name)]
return cls._get_objects(lb, names, minimal)
###########################################################################
# Public API
###########################################################################
def refresh(self):
"""Fetch all attributes from the lb"""
self.active_member_count
self.description
self.lbmethod
self.members
self.minimum_active_member
self.minimum_up_member
self.slow_ramp_time
self.statistics
def exists(self):
try:
self._lbcall('get_description', [self._name])
except ServerError as e:
if 'was not found' in str(e):
return False
else:
raise
except:
raise
return True
def reset_statistics(self):
self._lbcall('reset_statistics', [self._name])
@f5.util.lbtransaction
def save(self):
if not self.exists():
if self._lbmethod is None or self._members is None:
raise RuntimeError('lbmethod and members must be set on create')
self._lbcall('create_v2', [self._name],
[unmunge_lbmethod([self._lbmethod])[0]], [self._members])
if self._description is not None:
self.description = self._description
@f5.util.lbwriter2
def delete(self):
"""Delete the pool from the lb"""
self._lbcall('delete_pool', [self._name])
Pool.factory = f5.util.CachedFactory(Pool)
class PoolList(list):
def __init__(self,
lb = None,
pattern = None,
partition = '/',
fromdict = None):
self._lb = lb
self._partition = partition
self._pattern = pattern
if lb is not None:
self.refresh()
else:
self.dictionary = fromdict
@f5.util.restore_session_values
def refresh(self):
self.lb.active_folder = self._partition
if self._partition == '/':
self.lb.recursive_query = True
pools = Pool._get(self._lb, self._pattern)
del self[:]
self.extend(pools)
@f5.util.lbtransaction
def sync(self, create=False):
if create is True:
self._lbcall('create_v2', [self.names], self._getattr('_lbmethod'),
[self._getattr('_members')])
else:
self.lbmethod = self._getattr('_lbmethod')
self.members = self._getattr('_members')
self.description = self._getattr('_description')
def _lbcall(self, call, *args, **kwargs):
return Pool._lbcall(self._lb, call, *args, **kwargs)
def _setattr(self, attr, values):
if len(values) != len(self):
raise ValueError('value must be of same length as list')
for idx,pool in enumerate(self):
setattr(pool, attr, values[idx])
def _getattr(self, attr):
return [getattr(pool, attr) for pool in self]
@property
def partition(self):
return self._partition
@partition.setter
def partition(self, value):
self._partition = value
refresh()
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, value):
self._pattern = value
self.refresh()
#### DESCRIPTION ####
@property
def description(self):
values = self._lbcall('get_description', self.names)
self._setattr('_description', values)
return values
@description.setter
@f5.util.multisetter
def description(self, values):
self._lbcall('set_description', self.names, values)
self._setattr('_description', values)
@property
def _description(self):
return self._getattr('_description')
@_description.setter
@f5.util.multisetter
def _lbmethod(self, values):
self._setattr('_description', values)
#### LBMETHOD ####
@property
def lbmethod(self):
values = self._lbcall('get_lbmethod', self.names)
self._setattr('_lbmethod', values)
return values
@property
def _lbmethod(self):
return self._getattr('_lbmethod')
@_lbmethod.setter
@f5.util.multisetter
def _lbmethod(self, values):
self._setattr('_lbmethod', values)
#### LB ####
@property
def lb(self):
return self._lb
@lb.setter
@f5.util.multisetter
def lb(self, value):
self._setattr('_lb', value)
self._lb = value
#### NAME ####
@property
def names(self):
return self._getattr('name')
@property
def _names(self):
return self._names
@_names.setter
def _names(self, values):
self._setattr('_name', values)
### RATE_LIMIT ###
@property
def rate_limit(self):
values = self._lbcall('get_rate_limit', self.names)
self._setattr('_rate_limit', values)
return values
@rate_limit.setter
@f5.util.multisetter
def rate_limit(self, values):
self._lbcall('set_rate_limit', self.names, values)
self._setattr('_rate_limit', values)
@property
def _rate_limit(self):
return self._getattr('_rate_limit')
@_rate_limit.setter
@f5.util.multisetter
def _rate_limit(self, values):
return self._setattr('_rate_limit', values)
### RATIO ###
@property
def ratio(self):
values = self._lbcall('get_ratio', self.names)
self._setattr('_ratio', values)
return values
@ratio.setter
@f5.util.multisetter
def ratio(self, values):
self._lbcall('set_ratio', self.names, values)
self._setattr('_ratio', values)
@property
def _ratio(self):
return self._getattr('_ratio')
@_ratio.setter
@f5.util.multisetter
def _ratio(self, values):
return self._setattr('_ratio', values)
#### STATUS_DESCR ####
@property
def status_descr(self):
values = [s['status_description'] for s in self._lbcall('get_object_status', self.names)]
self._setattr('_status_descr', values)
return values
@property
def _status_descr(self):
return self._getattr('_status_descr')
#### DICTIONARY ####
@property
def dictionary(self):
d = {}
d['lb'] = self.lb
d['partition'] = self.partition
d['pattern'] = self.pattern
self.address
self.av_status
self.connection_limit
self.description
self.dynamic_ratio
self.enabled
self.rate_limit
self.ratio
self.status_descr
d['nodes'] = [node._dictionary for node in self]
return d
@property
def _dictionary(self):
d = {}
d['lb'] = self.lb
d['partition'] = self.partition
d['pattern'] = self.pattern
# We're in asynchronous mode so we can simply use Node's builtin ._dictionary
d['nodes'] = [node._dictionary for node in self]
return d
@dictionary.setter
@f5.util.lbtransaction
def dictionary(self, _dict):
# Set asynchronous attributes so we don't refresh from lb
self._lb = _dict['lb']
self._partition = _dict['partition']
self._pattern = _dict['pattern']
del self[:]
self.extend(Node.factory.create([d['name'] for d in _dict['nodes']], self._lb))
self.connection_limit = [node['connection_limit'] for node in _dict['nodes']]
self.description = [node['description'] for node in _dict['nodes']]
self.dynamic_ratio = [node['dynamic_ratio'] for node in _dict['nodes']]
self.enabled = [node['enabled'] for node in _dict['nodes']]
self.rate_limit = [node['rate_limit'] for node in _dict['nodes']]
self.ratio = [node['ratio'] for node in _dict['nodes']]
@_dictionary.setter
def _dictionary(self, _dict):
self._lb = _dict['lb']
self._partition = _dict['partition']
self._pattern = _dict['pattern']
nodes = Node.factory.create([d['name'] for d in _dict['nodes']], self._lb)
# We're in asynchronous mode so we can simply use Node's builtin ._dictionary
for idx, node in enumerate(nodes):
node._dictionary = _dict['nodes'][idx]
del self[:]
self.extend(nodes)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import sqlalchemy as sa
from dnrm.db.sqlalchemy import models
from dnrm.exceptions import db as exceptions
from dnrm.openstack.common.db.sqlalchemy import session as db_session
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def db_create():
models.create_db()
def db_drop():
models.drop_db()
def db_cleanup():
db_session.cleanup()
def model_query(model, session=None):
session = session or db_session.get_session()
query = session.query(model)
return query
def falsy(value):
return bool(value) and (not isinstance(value, (str, unicode)) or
value.lower() != 'false')
def filters_to_condition(model, filter_fields, filter_values):
filter_values = copy.deepcopy(filter_values)
if 'class' in filter_values:
filter_values['klass'] = filter_values.pop('class')
and_list = []
if 'unused' in filter_values:
if falsy(filter_values.pop('unused')):
and_list.append(model.pool == None)
else:
and_list.append(model.pool != None)
for key in filter_fields:
column = getattr(model, key)
if key not in filter_values:
continue
value = filter_values.pop(key)
if isinstance(column.property.columns[0].type, sa.Boolean):
value = falsy(value)
if isinstance(value, (list, tuple, set)):
expr = column.in_(set(value))
else:
expr = (column == value)
and_list.append(expr)
if and_list:
return sa.and_(*and_list)
else:
return None
###############################################################################
# Resources
def _resource_to_dict(resource):
resource = dict(resource)
resource['class'] = resource.pop('klass')
data = resource.pop('data', {})
resource.update(data)
resource['unused'] = resource['pool'] is None
return resource
def _update_resource(resource, values):
values = copy.deepcopy(values)
for key in ('id', 'unused'):
if key in values:
del values[key]
if 'class' in values:
values['klass'] = values.pop('class')
validated_values = {}
for key in models.Resource.FILTER_FIELDS:
try:
validated_values[key] = values.pop(key)
except KeyError:
pass
if values:
data = copy.deepcopy(resource['data']) or {}
data.update(values)
validated_values['data'] = data
resource.update(validated_values)
def resource_create(driver_name, values):
resource = models.Resource()
_update_resource(resource, values)
resource['type'] = driver_name
resource.save()
return _resource_to_dict(resource)
def _resource_get_by_id(id, session=None):
task = (model_query(models.Resource, session=session)
.filter_by(id=id)
.first())
if not task:
raise exceptions.ResourceNotFound(id=id)
return task
def resource_get_by_id(id):
return _resource_to_dict(_resource_get_by_id(id))
def resource_update(id, values):
session = db_session.get_session()
with session.begin():
resource = _resource_get_by_id(id, session=session)
_update_resource(resource, values)
return _resource_to_dict(resource)
def resource_delete(id):
count = (model_query(models.Resource)
.filter_by(id=id)
.delete())
if not count:
raise exceptions.ResourceNotFound(id=id)
def make_query(model, search_opts, session=None):
search_opts = copy.deepcopy(search_opts)
filters = search_opts.pop('filters', {})
limit = search_opts.pop('limit', None)
offset = search_opts.pop('offset', None)
if search_opts:
raise ValueError(_('Unexpected search options: %(options)s'),
options=', '.join(search_opts.keys()))
query = model_query(models.Resource, session=session)
condition = filters_to_condition(model, model.FILTER_FIELDS, filters)
if condition is not None:
query = query.filter(condition)
if offset is not None:
query = query.offset(limit)
if limit is not None:
query = query.limit(limit)
return query
def resource_find(search_opts):
query = make_query(models.Resource, search_opts)
return [_resource_to_dict(resource) for resource in query.all()]
def resource_count(search_opts):
query = make_query(models.Resource, search_opts)
return query.count()
def resource_compare_update(id, filters, values):
session = db_session.get_session()
with session.begin():
filters = copy.deepcopy(filters)
query = make_query(models.Resource, {'filters': filters}, session)
query = query.filter(models.Resource.id == id)
resource = query.first()
if resource:
_update_resource(resource, values)
return _resource_to_dict(resource)
else:
return None
| |
__version__ = "0.5.1"
from collections.abc import Generator
class Try_:
_unhandled = ()
@staticmethod
def set_unhandled(es=None):
"""Set a list of the unhandled exceptions.
:param es: an iterable of exceptions or None
>>> from operator import getitem
>>> Try(getitem, [], 0) # doctest:+ELLIPSIS
Failure(IndexError(...))
>>> Try_.set_unhandled([IndexError])
>>> Try(getitem, [], 0) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
IndexError: ...
>>> Try_.set_unhandled()
>>> Try(getitem, [], 0) # doctest:+ELLIPSIS
Failure(IndexError(...))
"""
Try_._unhandled = tuple(es) if es is not None else tuple()
@property
def _v(self):
raise NotImplementedError # pragma: no cover
@property
def _fmt(self):
raise NotImplementedError # pragma: no cover
@staticmethod
def _identity_if_try_or_raise(v, msg="Invalid return type for f: {0}"):
if not isinstance(v, Try_):
raise TypeError(msg.format(type(v)))
return v
@staticmethod
def _raise_if_not_exception(e, msg="Invalid type for Failure: {0}"):
if not isinstance(e, Exception):
raise TypeError(msg.format(type(e)))
def __init__(self, _):
raise NotImplementedError(
"Use Try function or Success/Failure instead."
) # pragma: no cover
def __ne__(self, other):
"""
>>> Success(1) != Failure(Exception())
True
"""
return not self.__eq__(other)
def __repr__(self):
return self._fmt.format(repr(self._v))
def get(self):
"""If this is Success get wrapped value otherwise
throw stored exception
:return: stored value
>>> Success(1).get()
1
>>> Failure(Exception("e")).get()
Traceback (most recent call last):
...
Exception: e
"""
raise NotImplementedError # pragma: no cover
def getOrElse(self, default):
"""If this is a Success get stored value otherwise
return default
:param default: value to return if this is a Failure
:return:
>>> Success(1).getOrElse(0)
1
>>> Failure(Exception("e")).getOrElse(0)
0
"""
raise NotImplementedError # pragma: no cover
def orElse(self, default):
"""If this is a Success return self otherwise
default
:param default: Try_
:return:
>>> Success(1).orElse(Success(0))
Success(1)
>>> Failure(Exception("e")).orElse(Success(0))
Success(0)
"""
raise NotImplementedError # pragma: no cover
def map(self, f):
"""Apply function to the value.
:param f: function to be applied
:return: self if this is a Failure otherwise Try(f, self.get)
>>> inc = lambda x: x + 1
>>> def f(e): raise Exception("e")
>>> Success(1).map(inc)
Success(2)
>>> Failure(Exception("e")).map(inc) # doctest:+ELLIPSIS
Failure(...)
>>> Success("1").map(f) # doctest:+ELLIPSIS
Failure(...)
"""
raise NotImplementedError # pragma: no cover
def flatMap(self, f):
"""Apply function returning Try_ to the value.
:param f: function to be applied.
:return: self if this is a Failure otherwise f applied to self.get
>>> from operator import add
>>> Success(1).flatMap(lambda x: Try(add, x, 1))
Success(2)
>>> Failure(Exception("e")).flatMap(lambda x: Try(add, x, 1)) # doctest:+ELLIPSIS
Failure(...)
>>> Success(1).flatMap(lambda x: Try(add, x, "0")) # doctest:+ELLIPSIS
Failure(TypeError(...))
"""
raise NotImplementedError # pragma: no cover
def filter(self, f, exception_cls=Exception, msg=None):
"""Convert this to Failure if f(self.get()) evaluates to False
:param f: function to be applied
:param exception_cls: optional exception class to return
:param msg: optional message
:return: self if f evaluates to True otherwise Failure
>>> Success(1).filter(lambda x: x > 0)
Success(1)
>>> Success(1).filter(lambda x: x < 0, msg="Greater than zero") # doctest:+ELLIPSIS
Failure(Exception(...))
>>> Failure(Exception("e")).filter(lambda x: x) # doctest:+ELLIPSIS
Failure(Exception(...))
"""
raise NotImplementedError # pragma: no cover
def recover(self, f):
"""If this is a Failure apply f to value otherwise
:param f: function to be applied
:return: Either Success of Failure
>>> def f(e): raise Exception("e")
>>> Success(1).recover(lambda e: 0)
Success(1)
>>> Failure(Exception("e")).recover(lambda e: 0)
Success(0)
>>> Failure(Exception("e")).recover(f) # doctest:+ELLIPSIS
Failure(Exception(...))
"""
raise NotImplementedError # pragma: no cover
def recoverWith(self, f):
"""If this is a Failure apply f to self otherwise
return this
:param f: function to be applied
:return: Either Success of Failure
>>> Success(1).recoverWith(lambda t: Try(lambda: 0))
Success(1)
>>> Failure(Exception("e")).recoverWith(lambda t: Try(lambda: 0))
Success(0)
"""
raise NotImplementedError # pragma: no cover
def failed(self):
"""Inverts this Try_.
If it is a Failure it returns its exception wrapped with Success.
If it is a Success it returns Failure(TypeError())
:return: Try_[T]
>>> Success(1).failed() # doctest:+ELLIPSIS
Failure(TypeError(...))
>>> Failure(Exception("e")).failed() # doctest:+ELLIPSIS
Success(Exception(...))
"""
raise NotImplementedError # pragma: no cover
@property
def isFailure(self):
"""Check if this is a Failure.
>>> Success(1).isFailure
False
>>> Failure(Exception()).isFailure
True
"""
return not bool(self)
@property
def isSuccess(self):
"""Check if this is a Success.
>>> Success(1).isSuccess
True
>>> Failure(Exception()).isSuccess
False
"""
return bool(self)
class Success(Try_):
"""Represents a successful computation"""
__slots__ = ("_v",)
_fmt = "Success({0})"
@staticmethod
def __len__():
return 1
def __init__(self, v):
self._v = v
def __eq__(self, other):
"""
>>> Success(1) == Success(1)
True
>>> Success(1) == Success(2)
False
>>> Success(1) == 1
False
"""
return isinstance(other, Success) and self._v == other._v
def __hash__(self):
try:
return hash(self._v)
except TypeError as e:
raise TypeError("Cannot hash try with unhashable value") from e
def get(self):
return self._v
def getOrElse(self, default):
return self.get()
def orElse(self, default):
return self
def map(self, f):
return Try(f, self._v)
def flatMap(self, f):
v = Try(f, self._v)
return Try_._identity_if_try_or_raise(v if v.isFailure else v.get())
def filter(self, f, exception_cls=Exception, msg=None):
if f(self.get()):
return self
else:
return Failure(exception_cls(msg if msg else repr(f)))
def recover(self, f):
return self
def recoverWith(self, f):
return self
def failed(self):
return Failure(TypeError())
class Failure(Try_):
"""Represents a unsuccessful computation"""
__slots__ = ("_v",)
_fmt = "Failure({0})"
@staticmethod
def __len__():
return 0
def __init__(self, e):
Try_._raise_if_not_exception(e)
self._v: Exception = e
def __eq__(self, other):
"""
>>> Failure(Exception("e")) == Failure(Exception("e"))
True
>>> Failure(Exception(-1)) == Failure(Exception(0))
False
>>> Failure(Exception("e")) == Exception("e")
False
"""
return (
isinstance(other, Failure)
and
# Want to check an exact type so isinstance or issubclass
# are not good here
type(self._v) is type(other._v)
and self._v.args == other._v.args
)
def __hash__(self):
try:
return hash(self._v)
except TypeError as e:
raise TypeError("Cannot hash try with unhashable value") from e
def get(self):
raise self._v
def getOrElse(self, default):
return default
def orElse(self, default):
return Try_._identity_if_try_or_raise(default)
def map(self, f):
return Failure(self._v)
def flatMap(self, f):
return Failure(self._v)
def filter(self, f, exception_cls=Exception, msg=None):
return self
def recover(self, f):
return Success(self._v).map(f)
def recoverWith(self, f):
return Success(self._v).flatMap(f)
def failed(self):
return Success(self._v)
def Try(f, *args, **kwargs):
"""Evaluates f with provided arguments and wraps the result
using either Success or Failure.
:param f: Either Callable or Iterator or Generator which should be evaluated.
If f is Callable it is called with given args and kwargs
If f is Iterator we evaluate its next
If f is Generator and there is single arg we evaluate f.next(arg)
In such case f is not primed.
:param args: args which should be passed to f
:param kwargs: kwargs which should be passed to f
:return: Either success or Failure
>>> from operator import add, truediv
>>> Try(truediv, 1, 0) # doctest:+ELLIPSIS
Failure(ZeroDivisionError(...))
>>> Try(add, 1, 2)
Success(3)
"""
try:
if callable(f):
return Success(f(*args, **kwargs))
elif isinstance(f, Generator) and len(args) == 1 and not kwargs:
return Success(f.send(args[0]))
elif isinstance(f, Generator) and not args and not kwargs:
return Success(next(f))
else:
raise TypeError(
"Don't know how to try {} with {} and {}".format(type(f), args, kwargs)
)
except Try_._unhandled as e: # type: ignore
raise e
except Exception as e:
return Failure(e)
| |
import unittest
from bowling import BowlingGame
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
class BowlingTest(unittest.TestCase):
def roll(self, rolls):
[self.game.roll(roll) for roll in rolls]
def roll_and_score(self, rolls):
self.roll(rolls)
return self.game.score()
def test_should_be_able_to_score_a_game_with_all_zeros(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 0)
def test_should_be_able_to_score_a_game_with_no_strikes_or_spares(self):
rolls = [3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6]
score = self.roll_and_score(rolls)
self.assertEqual(score, 90)
def test_a_spare_follow_by_zeros_is_worth_ten_points(self):
rolls = [6, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 10)
def test_points_scored_in_the_roll_after_a_spare_are_counted_twice(self):
rolls = [6, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 16)
def test_consecutive_spares_each_get_a_one_roll_bonus(self):
rolls = [5, 5, 3, 7, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 31)
def test_last_frame_spare_gets_bonus_roll_that_is_counted_twice(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 3, 7]
score = self.roll_and_score(rolls)
self.assertEqual(score, 17)
def test_a_strike_earns_ten_points_in_a_frame_with_a_single_roll(self):
rolls = [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 10)
def test_two_rolls_points_after_strike_are_counted_twice(self):
rolls = [10, 5, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 26)
def test_consecutive_stikes_each_get_the_two_roll_bonus(self):
rolls = [10, 10, 10, 5, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score = self.roll_and_score(rolls)
self.assertEqual(score, 81)
def test_strike_in_last_frame_gets_two_roll_bonus_counted_once(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 7, 1]
score = self.roll_and_score(rolls)
self.assertEqual(score, 18)
def test_rolling_spare_with_bonus_roll_does_not_get_bonus(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 7, 3]
score = self.roll_and_score(rolls)
self.assertEqual(score, 20)
def test_strikes_with_the_two_bonus_rolls_do_not_get_bonus_rolls(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
10, 10]
score = self.roll_and_score(rolls)
self.assertEqual(score, 30)
def test_strike_with_bonus_after_spare_in_last_frame_gets_no_bonus(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,
3, 10]
score = self.roll_and_score(rolls)
self.assertEqual(score, 20)
def test_all_strikes_is_a_perfect_game(self):
rolls = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
score = self.roll_and_score(rolls)
self.assertEqual(score, 300)
def test_rolls_cannot_score_negative_points(self):
with self.assertRaisesWithMessage(ValueError):
self.game.roll(-1)
def test_a_roll_cannot_score_more_than_10_points(self):
with self.assertRaisesWithMessage(ValueError):
self.game.roll(11)
def test_two_rolls_in_a_frame_cannot_score_more_than_10_points(self):
self.game.roll(5)
with self.assertRaisesWithMessage(ValueError):
self.game.roll(6)
def test_bonus_after_strike_in_last_frame_cannot_score_more_than_10(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]
self.roll(rolls)
with self.assertRaisesWithMessage(ValueError):
self.game.roll(11)
def test_bonus_aft_last_frame_strk_can_be_more_than_10_if_1_is_strk(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
10, 6]
score = self.roll_and_score(rolls)
self.assertEqual(score, 26)
def test_bonus_aft_last_frame_strk_cnt_be_strk_if_first_is_not_strk(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 6]
self.roll(rolls)
with self.assertRaisesWithMessage(ValueError):
self.game.roll(10)
def test_an_incomplete_game_cannot_be_scored(self):
rolls = [0, 0]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.score()
def test_cannot_roll_if_there_are_already_ten_frames(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.roll(0)
def test_bonus_rolls_for_strike_must_be_rolled_before_score_is_calc(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.score()
def test_both_bonuses_for_strike_must_be_rolled_before_score(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.score()
def test_bonus_rolls_for_spare_must_be_rolled_before_score_is_calc(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 3]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.score()
def test_cannot_roll_after_bonus_roll_for_spare(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 3, 2]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.roll(2)
def test_cannot_roll_after_bonus_rolls_for_strike(self):
rolls = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10,
3, 2]
self.roll(rolls)
with self.assertRaisesWithMessage(IndexError):
self.game.roll(2)
# Utility functions
def setUp(self):
self.game = BowlingGame()
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| |
from subprocess import Popen, PIPE
from os.path import expanduser
from gi.repository import Gtk, Gdk
import json
import sys
import time
import os
import getpass
import argparse
GPG_ERR_PASSPHRASE = "Bad session key"
FILENAME = "entries"
DIRECTORY = expanduser("~") + "/.sef"
FULLPATH = DIRECTORY + "/" + FILENAME
class GPGException(Exception):
pass
class Entries:
def __init__(self, passphrase, file_path):
self.passphrase = passphrase
self.path = file_path
def encrypt_msg(self, msg):
cmd = ["gpg",
"--batch",
"--yes",
"--symmetric",
"--passphrase", self.passphrase,
"-o", self.path,
"--cipher-algo", "AES256"]
proc = Popen(cmd,
stdout = PIPE,
stderr = PIPE,
stdin = PIPE,
shell = False)
proc.stdin.write(bytes(msg, "UTF-8"))
return proc.communicate()
def decrypt_msg(self):
cmd = ["gpg",
"--batch",
"--yes",
"--decrypt",
"--passphrase", self.passphrase,
"--cipher-algo", "AES256",
self.path]
proc = Popen(cmd,
stdin = PIPE,
stdout = PIPE,
stderr = PIPE,
shell = False)
return proc.communicate()
def load(self):
proc = self.decrypt_msg()
out = proc[0].decode("UTF-8")
err = proc[1].decode("UTF-8")
if GPG_ERR_PASSPHRASE in err:
raise GPGException("Bad passphrase")
if not out:
self.entries = {}
else:
try:
self.entries = json.loads(out)
except ValueError as e:
print("File format corruption")
def save(self):
e_json = json.dumps(self.entries)
proc = self.encrypt_msg(e_json)
out = proc[0].decode("UTF-8")
err = proc[1].decode("UTF-8")
if GPG_ERR_PASSPHRASE in err:
raise GPGException("Bad passphrase")
def add(self, entry, entry_name):
if entry_name in self.entries:
return False
self.entries[entry_name] = entry
return True
def get_names(self):
entries = list()
for n, v in self.entries.items():
entries.append(n)
return entries
def get_names_by_tag(self, tag):
names = list()
for n, v in self.entries.items():
if v["tag"] == tag:
names.append(n)
return names
def get_tags(self, tag):
tags = set()
for entry in self.entries:
tags.add(entry["tag"])
return tags
def get(self, entry):
return self.entries[entry]
def remove(self, entry):
del self.entries[entry]
def to_json(self):
return json.dumps(self.entries)
def op_add(args):
p1 = getpass.getpass("[Enter password]: ")
p2 = getpass.getpass("[Re-Enter password]: ")
p3 = getpass.getpass("[Enter Passphrase]: ")
entry = {"password" : p1,
"name" : args.name,
"tag" : args.tag}
if p1 != p2:
print("Password missmatch!")
return
try:
entries = Entries(p3, FULLPATH)
entries.load()
if entries.add(entry, args.entry):
entries.save()
print("New entry created.")
else:
print("The entry already exists!")
except GPGException as e:
print(e)
def op_list(args):
passphrase = getpass.getpass("[Enter Passphrase]: ")
try:
entries = Entries(passphrase, FULLPATH)
entries.load()
if not args.tag:
for n in entries.get_names():
print(n)
else:
for n in entries.get_names_by_tag(args.tag):
print(n)
except GPGException as e:
print(e)
def op_get_attribute(args, attribute):
if not args.entry:
print("No entry specified. Use \"-e [entry]\"")
return
passphrase = getpass.getpass("[Enter Passphrase]: ")
try:
entries = Entries(passphrase, FULLPATH)
entries.load()
a = entries.get(args.entry)[attribute]
if args.stdout:
print(a)
else:
clip = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clip.set_text(a, -1)
clip.store()
except GPGException as e:
print(e)
except KeyError as e:
print("No such entry.")
def op_remove(args):
if not args.entry:
print("No entry specified. Use \"-e [entry]\"")
return
passphrase = getpass.getpass("[Enter Passphrase]: ")
try:
entries = Entries(passphrase, FULLPATH)
entries.load()
entries.remove(args.entry)
entries.save()
print("Entry removed.")
except GPGException as e:
print(e)
except KeyError as e:
print("No such entry.")
def op_export(args):
passphrase = getpass.getpass("[Enter Passphrase]: ")
try:
entries = Entries(passphrase, FULLPATH)
entries.load()
print(entries.to_json())
except GPGException as e:
print(e)
def get_args_parser():
parser = argparse.ArgumentParser(description="Password Util")
parser.add_argument("operation",
help="Operations: add, remove, update, export")
parser.add_argument("-e", "--entry", help="The entry")
parser.add_argument("-n", "--name", help="Username")
parser.add_argument("-t", "--tag", help="Entry tag", default="")
parser.add_argument("-o", "--stdout", help="Print to stdout", action="store_true")
return parser
def process_actions():
parser = get_args_parser()
args = parser.parse_args()
if args.operation == "export":
op_export(args)
elif args.operation == "add":
op_add(args)
elif args.operation == "remove":
op_remove(args)
elif args.operation == "list":
op_list(args)
elif args.operation == "getpass":
op_get_attribute(args, "password")
elif args.operation == "getuser":
op_get_attribute(args, "name")
else:
print("Unknow operation " + "\"" +
args["operation"] + "\"")
if __name__ == "__main__":
if not os.path.exists(DIRECTORY):
os.makedirs(DIRECTORY)
process_actions()
# daemonize
if os.fork():
sys.exit()
time.sleep(8)
clip = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clip.clear()
clip.store()
| |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Boot Interface for iLO drivers and its supporting methods.
"""
import os
import tempfile
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six.moves.urllib.parse as urlparse
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import image_service
from ironic.common import images
from ironic.common import swift
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
REQUIRED_PROPERTIES = {
'ilo_deploy_iso': _("UUID (from Glance) of the deployment ISO. "
"Required.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
"""
info = node.driver_info
d_info = {}
d_info['ilo_deploy_iso'] = info.get('ilo_deploy_iso')
error_msg = _("Error validating iLO virtual media deploy. Some parameters"
" were missing in node's driver_info")
deploy_utils.check_for_missing_params(d_info, error_msg)
return d_info
def _get_boot_iso_object_name(node):
"""Returns the boot iso object name for a given node.
:param node: the node for which object name is to be provided.
"""
return "boot-%s" % node.uuid
def _get_boot_iso(task, root_uuid):
"""This method returns a boot ISO to boot the node.
It chooses one of the three options in the order as below:
1. Does nothing if 'ilo_boot_iso' is present in node's instance_info and
'boot_iso_created_in_web_server' is not set in 'driver_internal_info'.
2. Image deployed has a meta-property 'boot_iso' in Glance. This should
refer to the UUID of the boot_iso which exists in Glance.
3. Generates a boot ISO on the fly using kernel and ramdisk mentioned in
the image deployed. It uploads the generated boot ISO to Swift.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:returns: boot ISO URL. Should be either of below:
* A Swift object - It should be of format 'swift:<object-name>'. It is
assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP URL.
On error finding the boot iso, it returns None.
:raises: MissingParameterValue, if any of the required parameters are
missing in the node's driver_info or instance_info.
:raises: InvalidParameterValue, if any of the parameters have invalid
value in the node's driver_info or instance_info.
:raises: SwiftOperationError, if operation with Swift fails.
:raises: ImageCreationFailed, if creation of boot ISO failed.
:raises: exception.ImageRefValidationFailed if ilo_boot_iso is not
HTTP(S) URL.
"""
LOG.debug("Trying to get a boot ISO to boot the baremetal node")
# Option 1 - Check if user has provided ilo_boot_iso in node's
# instance_info
driver_internal_info = task.node.driver_internal_info
boot_iso_created_in_web_server = (
driver_internal_info.get('boot_iso_created_in_web_server'))
if (task.node.instance_info.get('ilo_boot_iso')
and not boot_iso_created_in_web_server):
LOG.debug("Using ilo_boot_iso provided in node's instance_info")
boot_iso = task.node.instance_info['ilo_boot_iso']
if not service_utils.is_glance_image(boot_iso):
try:
image_service.HttpImageService().validate_href(boot_iso)
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Virtual media deploy accepts only Glance "
"images or HTTP(S) URLs as "
"instance_info['ilo_boot_iso']. Either %s "
"is not a valid HTTP(S) URL or is "
"not reachable."), boot_iso)
return task.node.instance_info['ilo_boot_iso']
# Option 2 - Check if user has provided a boot_iso in Glance. If boot_iso
# is a supported non-glance href execution will proceed to option 3.
deploy_info = _parse_deploy_info(task.node)
image_href = deploy_info['image_source']
image_properties = (
images.get_image_properties(
task.context, image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
boot_iso_uuid = image_properties.get('boot_iso')
kernel_href = (task.node.instance_info.get('kernel') or
image_properties.get('kernel_id'))
ramdisk_href = (task.node.instance_info.get('ramdisk') or
image_properties.get('ramdisk_id'))
if boot_iso_uuid:
LOG.debug("Found boot_iso %s in Glance", boot_iso_uuid)
return boot_iso_uuid
if not kernel_href or not ramdisk_href:
LOG.error(_LE("Unable to find kernel or ramdisk for "
"image %(image)s to generate boot ISO for %(node)s"),
{'image': image_href, 'node': task.node.uuid})
return
# NOTE(rameshg87): Functionality to share the boot ISOs created for
# similar instances (instances with same deployed image) is
# not implemented as of now. Creation/Deletion of such a shared boot ISO
# will require synchronisation across conductor nodes for the shared boot
# ISO. Such a synchronisation mechanism doesn't exist in ironic as of now.
# Option 3 - Create boot_iso from kernel/ramdisk, upload to Swift
# or web server and provide its name.
deploy_iso_uuid = deploy_info['ilo_deploy_iso']
boot_mode = deploy_utils.get_boot_mode_for_deploy(task.node)
boot_iso_object_name = _get_boot_iso_object_name(task.node)
kernel_params = CONF.pxe.pxe_append_params
with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as fileobj:
boot_iso_tmp_file = fileobj.name
images.create_boot_iso(task.context, boot_iso_tmp_file,
kernel_href, ramdisk_href,
deploy_iso_uuid, root_uuid,
kernel_params, boot_mode)
if CONF.ilo.use_web_server_for_images:
boot_iso_url = (
ilo_common.copy_image_to_web_server(boot_iso_tmp_file,
boot_iso_object_name))
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
task.node.driver_internal_info = driver_internal_info
task.node.save()
LOG.debug("Created boot_iso %(boot_iso)s for node %(node)s",
{'boot_iso': boot_iso_url, 'node': task.node.uuid})
return boot_iso_url
else:
container = CONF.ilo.swift_ilo_container
swift_api = swift.SwiftAPI()
swift_api.create_object(container, boot_iso_object_name,
boot_iso_tmp_file)
LOG.debug("Created boot_iso %s in Swift", boot_iso_object_name)
return 'swift:%s' % boot_iso_object_name
def _clean_up_boot_iso_for_instance(node):
"""Deletes the boot ISO if it was created for the instance.
:param node: an ironic node object.
"""
ilo_boot_iso = node.instance_info.get('ilo_boot_iso')
if not ilo_boot_iso:
return
if ilo_boot_iso.startswith('swift'):
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
boot_iso_object_name = _get_boot_iso_object_name(node)
try:
swift_api.delete_object(container, boot_iso_object_name)
except exception.SwiftOperationError as e:
LOG.exception(_LE("Failed to clean up boot ISO for node "
"%(node)s. Error: %(error)s."),
{'node': node.uuid, 'error': e})
elif CONF.ilo.use_web_server_for_images:
result = urlparse.urlparse(ilo_boot_iso)
ilo_boot_iso_name = os.path.basename(result.path)
boot_iso_path = os.path.join(
CONF.deploy.http_root, ilo_boot_iso_name)
ironic_utils.unlink_without_raise(boot_iso_path)
def _parse_deploy_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = {}
info.update(deploy_utils.get_image_instance_info(node))
info.update(parse_driver_info(node))
return info
class IloVirtualMediaBoot(base.BootInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue, if some information is invalid.
:raises: MissingParameterValue if 'kernel_id' and 'ramdisk_id' are
missing in the Glance image or 'kernel' and 'ramdisk' not provided
in instance_info for non-Glance image.
"""
node = task.node
d_info = _parse_deploy_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of deploy ramdisk using virtual media.
This method prepares the boot of the deploy ramdisk after
reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
# Clear ilo_boot_iso if it's a glance image to force recreate
# another one again (or use existing one in glance).
# This is mainly for rebuild scenario.
if service_utils.is_glance_image(
node.instance_info.get('image_source')):
instance_info = node.instance_info
instance_info.pop('ilo_boot_iso', None)
node.instance_info = instance_info
node.save()
# Eject all virtual media devices, as we are going to use them
# during deploy.
ilo_common.eject_vmedia_devices(task)
deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)
ramdisk_params['BOOTIF'] = deploy_nic_mac
deploy_iso = node.driver_info['ilo_deploy_iso']
ilo_common.setup_vmedia(task, deploy_iso, ramdisk_params)
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info.
It does the following depending on boot_option for deploy:
- If the boot_option requested for this deploy is 'local' or image
is a whole disk image, then it sets the node to boot from disk.
- Otherwise it finds/creates the boot ISO to boot the instance
image, attaches the boot ISO to the bare metal and then sets
the node to boot from CDROM.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
# For iscsi_ilo driver, we boot from disk every time if the image
# deployed is a whole disk image.
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if deploy_utils.get_boot_option(node) == "local" or iwdi:
manager_utils.node_set_boot_device(task, boot_devices.DISK,
persistent=True)
else:
drv_int_info = node.driver_internal_info
root_uuid_or_disk_id = drv_int_info.get('root_uuid_or_disk_id')
if root_uuid_or_disk_id:
self._configure_vmedia_boot(task, root_uuid_or_disk_id)
else:
LOG.warning(_LW("The UUID for the root partition could not "
"be found for node %s"), node.uuid)
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It ejects virtual media
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
_clean_up_boot_iso_for_instance(task.node)
driver_internal_info = task.node.driver_internal_info
driver_internal_info.pop('boot_iso_created_in_web_server', None)
driver_internal_info.pop('root_uuid_or_disk_id', None)
task.node.driver_internal_info = driver_internal_info
task.node.save()
ilo_common.cleanup_vmedia_boot(task)
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up virtual media devices setup for the deploy
ramdisk.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
def _configure_vmedia_boot(self, task, root_uuid):
"""Configure vmedia boot for the node.
:param task: a task from TaskManager.
:param root_uuid: uuid of the root partition
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
boot_iso = _get_boot_iso(task, root_uuid)
if not boot_iso:
LOG.error(_LE("Cannot get boot ISO for node %s"), node.uuid)
return
# Upon deploy complete, some distros cloud images reboot the system as
# part of its configuration. Hence boot device should be persistent and
# not one-time.
ilo_common.setup_vmedia_for_boot(task, boot_iso)
manager_utils.node_set_boot_device(task,
boot_devices.CDROM,
persistent=True)
i_info = node.instance_info
i_info['ilo_boot_iso'] = boot_iso
node.instance_info = i_info
node.save()
| |
# Copyright 2013 Google Inc. All Rights Reserved.
"""Some general file utilities used that can be used by the Cloud SDK."""
import errno
import hashlib
import logging
import os
import shutil
import sys
import tempfile
import time
NUM_RETRIES = 10
# WindowsError only exists when running on Windows
try:
# pylint: disable=invalid-name, We are not defining this name.
WindowsError
except NameError:
# pylint: disable=invalid-name, We are not defining this name.
WindowsError = None
class Error(Exception):
"""Base exception for the file_utils module."""
pass
def MakeDir(path, mode=0777):
"""Creates the given directory and its parents and does not fail if it exists.
Args:
path: str, The path of the directory to create.
mode: int, The permissions to give the created directories. 0777 is the
default mode for os.makedirs(), allowing reading, writing, and listing
by all users on the machine.
Raises:
OSError: if the operation fails.
"""
try:
os.makedirs(path, mode=mode)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _WaitForRetry(retries_left):
"""Sleeps for a period of time based on the retry count.
Args:
retries_left: int, The number of retries remaining. Should be in the range
of NUM_RETRIES - 1 to 0.
"""
time_to_wait = .1 * (2 * (NUM_RETRIES - retries_left))
logging.debug('Waiting for retry: [%s]', time_to_wait)
time.sleep(time_to_wait)
RETRY_ERROR_CODES = [32, 145]
def _ShouldRetryOperation(func, exc_info):
"""Matches specific error types that should be retried.
This will retry the following errors:
WindowsError(32, 'The process cannot access the file because it is being '
'used by another process'), When a file is in use.
WindowsError(145, 'The directory is not empty'), When a directory cannot be
deleted.
Args:
func: function, The function that failed.
exc_info: sys.exc_info(), The current exception state.
Returns:
True if the error can be retried or false if we should just fail.
"""
if not (func == os.remove or func == os.rmdir):
return False
if not WindowsError or exc_info[0] != WindowsError:
return False
e = exc_info[1]
return e.winerror in RETRY_ERROR_CODES
def _RetryOperation(exc_info, func, args,
retry_test_function=lambda func, exc_info: True):
"""Attempts to retry the failed file operation.
Args:
exc_info: sys.exc_info(), The current exception state.
func: function, The function that failed.
args: (str, ...), The tuple of args that should be passed to func when
retrying.
retry_test_function: The function to call to determine if a retry should be
attempted. Takes the function that is being retried as well as the
current exc_info.
Returns:
True if the operation eventually succeeded or False if it continued to fail
for all retries.
"""
retries_left = NUM_RETRIES
while retries_left > 0 and retry_test_function(func, exc_info):
logging.debug('Retrying file system operation: %s, %s, %s, retries_left=%s',
func, args, exc_info, retries_left)
retries_left -= 1
try:
_WaitForRetry(retries_left)
func(*args)
return True
# pylint: disable=bare-except, We look at the exception later.
except:
exc_info = sys.exc_info()
return False
def _HandleRemoveError(func, failed_path, exc_info):
"""A fucntion to pass as the onerror arg to rmdir for handling errors.
Args:
func: function, The function that failed.
failed_path: str, The path of the file the error occurred on.
exc_info: sys.exc_info(), The current exception state.
"""
logging.debug('Handling file system error: %s, %s, %s',
func, failed_path, exc_info)
# Don't remove the trailing comma in the passed arg tuple. It indicates that
# it is a tuple of 1, rather than a tuple of characters that will get expanded
# by *args.
if not _RetryOperation(exc_info, func, (failed_path,), _ShouldRetryOperation):
# Always raise the original error.
# raises is weird in that you can raise exc_info directly even though it's
# a tuple.
raise exc_info[0], exc_info[1], exc_info[2]
def RmTree(path):
"""Calls shutil.rmtree() with error handling to fix Windows problems.
It also ensures that the top level directory deletion is actually reflected
in the file system before this returns.
Args:
path: str, The path to remove.
"""
shutil.rmtree(path, onerror=_HandleRemoveError)
retries_left = NUM_RETRIES
while os.path.isdir(path) and retries_left > 0:
logging.debug('Waiting for directory to disappear: %s', path)
retries_left -= 1
_WaitForRetry(retries_left)
def _DestInSrc(src, dst):
# Copied directly from shutil
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def MoveDir(src, dst):
"""Recursively moves a directory to another location.
This code is mostly copied from shutil.move(), but has been scoped down to
specifically handle only directories. The src must be a directory, and
the dst must not exist. It uses functions from this module to be resilient
against spurious file system errors in Windows. It will try to do an
os.rename() of the directory. If that fails, the tree will be copied to the
new location and then deleted from the old location.
Args:
src: str, The directory path to move.
dst: str, The path to move the directory to.
Raises:
Error: If the src or dst directories are not valid.
"""
if not os.path.isdir(src):
raise Error("Source path '{0}' must be a directory".format(src))
if os.path.exists(dst):
raise Error("Destination path '{0}' already exists".format(dst))
if _DestInSrc(src, dst):
raise Error("Cannot move a directory '{0}' into itself '{0}'."
.format(src, dst))
try:
logging.debug('Attempting to move directory [%s] to [%s]', src, dst)
try:
os.rename(src, dst)
except OSError:
if not _RetryOperation(sys.exc_info(), os.rename, (src, dst)):
raise
except OSError as e:
logging.debug('Directory rename failed. Falling back to copy. [%s]', e)
shutil.copytree(src, dst, symlinks=True)
RmTree(src)
def FindDirectoryContaining(starting_dir_path, directory_entry_name):
"""Searches directories upwards until it finds one with the given contents.
This can be used to find the directory above you that contains the given
entry. It is useful for things like finding the workspace root you are under
that contains a configuration directory.
Args:
starting_dir_path: str, The path of the directory to start searching
upwards from.
directory_entry_name: str, The name of the directory that must be present
in order to return the current directory.
Returns:
str, The full path to the directory above the starting dir that contains the
given entry, or None if the root of the file system was hit without finding
it.
"""
prev_path = None
path = os.path.realpath(starting_dir_path)
while path != prev_path:
search_dir = os.path.join(path, directory_entry_name)
if os.path.isdir(search_dir):
return path
prev_path = path
path, _ = os.path.split(path)
return None
def SearchForExecutableOnPath(executable, path=None):
"""Tries to find all 'executable' in the directories listed in the PATH.
This is mostly copied from distutils.spawn.find_executable() but with a
few differences. It does not check the current directory for the
executable. We only want to find things that are actually on the path, not
based on what the CWD is. It also returns a list of all matching
executables. If there are multiple versions of an executable on the path
it will return all of them at once.
Args:
executable: The name of the executable to find
path: A path to search. If none, the system PATH will be used.
Returns:
A list of full paths to matching executables or an empty list if none
are found.
"""
if not path:
path = os.getenv('PATH')
paths = path.split(os.pathsep)
matching = []
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
matching.append(f)
return matching
def FindExecutableOnPath(executable, path=None, pathext=None):
"""Searches for 'executable' in the directories listed in 'path'.
If 'executable' contains any directory components then 'path' is ignored. If
'pathext' is specified and 'executable' does not have any of those extensions
then 'executable'+'extension' is checked.
Args:
executable: The name of the executable to find.
path: A list of directories to search separated by 'os.pathsep'. If none
then the system PATH is used.
pathext: A list of file name extensions to use separated by 'os.pathsep'.
If none then the platform specific system PATHEXT is used.
Returns:
The path of 'executable' (+ 'extension' if necessary) if found and
executable, None if not found.
"""
head, tail = os.path.split(executable)
if head:
path = '.'
elif path is None:
path = os.environ.get('PATH')
if not path:
return None
paths = path.split(os.pathsep)
if pathext is None:
if sys.platform == 'win32' or os.name == 'os2':
pathext = os.environ.get('PATHEXT')
if pathext is None:
pathext = ''
extensions = pathext.split(os.pathsep)
if extensions:
_, extension = os.path.splitext(tail)
for ext in extensions:
if extension == ext:
extensions = []
for directory in paths:
head = os.path.join(directory, executable)
for tail in [''] + extensions:
full = head + tail
if os.path.isfile(full) and os.access(full, os.X_OK):
return full
return None
def HasWriteAccessInDir(directory):
"""Determines if the current user is able to modify the contents of the dir.
Args:
directory: str, The full path of the directory to check.
Raises:
ValueError: If the given directory path is not a valid directory.
Returns:
True if the current user has missing write and execute permissions.
"""
if not os.path.isdir(directory):
raise ValueError(
'The given path [{path}] is not a directory.'.format(path=directory))
# Appending . tests search permissions, especially on windows, by forcing
# 'directory' to be treated as a directory
path = os.path.join(directory, '.')
if not os.access(path, os.X_OK) or not os.access(path, os.W_OK):
# We can believe os.access() indicating no access.
return False
# At this point the only platform and filesystem independent method is to
# attempt to create or delete a file in the directory.
#
# Why? os.accesss() and os.stat() use the underlying C library on Windows,
# which doesn't check the correct user and group permissions and almost always
# results in false positive writability tests.
path = os.path.join(directory,
'.HasWriteAccessInDir{pid}'.format(pid=os.getpid()))
# while True: should work here, but we limit the retries just in case.
for _ in range(10):
try:
fd = os.open(path, os.O_RDWR | os.O_CREAT, 0666)
os.close(fd)
except OSError as e:
if e.errno == errno.EACCES:
# No write access.
return False
if e.errno in [errno.ENOTDIR, errno.ENOENT]:
# The directory has been removed or replaced by a file.
raise ValueError('The given path [{path}] is not a directory.'.format(
path=directory))
raise
try:
os.remove(path)
# Write access.
return True
except OSError as e:
if e.errno == errno.EACCES:
# No write access.
return False
# os.remove() could fail with ENOENT if we're in a race with another
# process/thread (which just succeeded) or if the directory has been
# removed.
if e.errno != errno.ENOENT:
raise
return False
class TemporaryDirectory(object):
"""A class to easily create and dispose of temporary directories.
Securely creates a directory for temporary use. This class can be used with
a context manager (the with statement) to ensure cleanup in exceptional
situations.
"""
def __init__(self):
self.__temp_dir = tempfile.mkdtemp()
@property
def path(self):
return self.__temp_dir
def __enter__(self):
return self.path
def __exit__(self, *exceptional):
self.Close()
# always return False so any exceptions will be re-raised
return False
def Close(self):
if self.path:
RmTree(self.path)
self.__temp_dir = None
return True
return False
class Checksum(object):
"""Consistently handles calculating checksums across the Cloud SDK."""
def __init__(self):
"""Creates a new Checksum."""
self.__hash = hashlib.sha1()
self.__files = set()
def AddContents(self, contents):
"""Adds the given string contents to the checksum.
Args:
contents: str, The contents to add.
Returns:
self, For method chaining.
"""
self.__hash.update(contents)
return self
def AddFileContents(self, file_path):
"""Adds the contents of the given file to the checksum.
Args:
file_path: str, The file path of the contents to add.
Returns:
self, For method chaining.
"""
with open(file_path, 'rb') as fp:
for chunk in iter(lambda: fp.read(4096), ''):
self.__hash.update(chunk)
return self
def AddDirectory(self, dir_path):
"""Adds all files under the given directory to the checksum.
This adds both the contents of the files as well as their names and
locations to the checksum. If the checksums of two directories are equal
this means they have exactly the same files, and contents.
Args:
dir_path: str, The directory path to add all files from.
Returns:
self, For method chaining.
"""
for root, dirs, files in os.walk(dir_path):
dirs.sort(key=os.path.normcase)
files.sort(key=os.path.normcase)
for d in dirs:
path = os.path.join(root, d)
# We don't traverse directory links, but add the fact that it was found
# in the tree.
if os.path.islink(path):
relpath = os.path.relpath(path, dir_path)
self.__files.add(relpath)
self.AddContents(relpath)
self.AddContents(os.readlink(path))
for f in files:
path = os.path.join(root, f)
relpath = os.path.relpath(path, dir_path)
self.__files.add(relpath)
self.AddContents(relpath)
if os.path.islink(path):
self.AddContents(os.readlink(path))
else:
self.AddFileContents(path)
return self
def HexDigest(self):
"""Gets the hex digest for all content added to this checksum.
Returns:
str, The checksum digest as a hex string.
"""
return self.__hash.hexdigest()
def Files(self):
"""Gets the list of all files that were discovered when adding a directory.
Returns:
{str}, The relative paths of all files that were found when traversing the
directory tree.
"""
return self.__files
def OpenForWritingPrivate(path):
"""Open a file for writing, with the right permissions for user-private files.
Args:
path: str, The full path to the file.
Returns:
A file context manager.
"""
parent_dir_path, _ = os.path.split(path)
full_parent_dir_path = os.path.realpath(os.path.expanduser(parent_dir_path))
MakeDir(full_parent_dir_path, mode=0700)
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
# Accommodate Windows; stolen from python2.6/tempfile.py.
if hasattr(os, 'O_NOINHERIT'):
flags |= os.O_NOINHERIT
fd = os.open(path, flags, 0600)
return os.fdopen(fd, 'w')
class Context(object):
"""Wrap a file in a context.
Some libraries return file contexts in 2.7, but not in 2.6. Wrapping the
returned file in this class makes it so our code works for either version.
"""
def __init__(self, f):
self.__f = f
def __enter__(self):
return self.__f
def __exit__(self, typ, value, traceback):
self.__f.close()
class ChDir(object):
"""Do some things from a certain directory, and reset the directory afterward.
"""
def __init__(self, directory):
self.__dir = directory
def __enter__(self):
self.__original_dir = os.getcwd()
os.chdir(self.__dir)
return self.__dir
def __exit__(self, typ, value, traceback):
os.chdir(self.__original_dir)
| |
#!/usr/bin/env python
# Usage: normalize_acl.py acl.config [transformation [transformation [...]]]
#
# Transformations:
# all Apply all transformations.
# 0 - dry run (default, print to stdout rather than modifying file in place)
# 1 - strip/condense whitespace and sort (implied by any other transformation)
# 2 - get rid of unneeded create on refs/tags
# 3 - remove any project.stat{e,us} = active since it's a default or a typo
# 4 - strip default *.owner = group Administrators permissions
# 5 - sort the exclusiveGroupPermissions group lists
# 6 - replace openstack-ci-admins and openstack-ci-core with infra-core
# 7 - add at least one core team, if no team is defined with special suffixes
# like core, admins, milestone or Users
# 8 - fix All-Projects inheritance shadowed by exclusiveGroupPermissions
import re
import sys
aclfile = sys.argv[1]
try:
transformations = sys.argv[2:]
if transformations and transformations[0] == 'all':
transformations = [str(x) for x in range(0, 9)]
except KeyError:
transformations = []
def tokens(data):
"""Human-order comparison
This handles embedded positive and negative integers, for sorting
strings in a more human-friendly order."""
data = data.replace('.', ' ').split()
for n in range(len(data)):
try:
data[n] = int(data[n])
except ValueError:
pass
return data
acl = {}
out = ''
valid_keys = {'abandon',
'access',
'copyAllScoresOnTrivialRebase',
'create',
'defaultValue',
'exclusiveGroupPermissions',
'forgeAuthor',
'forgeCommitter',
'function',
'label-Code-Review',
'label-Rollcall-Vote',
'label-Workflow',
'label-Verified',
'mergeContent',
'push',
'pushMerge',
'pushSignedTag',
'requireChangeId',
'requireContributorAgreement',
'state',
'value'
}
if '0' in transformations or not transformations:
dry_run = True
else:
dry_run = False
aclfd = open(aclfile)
for line in aclfd:
# condense whitespace to single spaces and get rid of leading/trailing
line = re.sub('\s+', ' ', line).strip()
# skip empty lines
if not line:
continue
# this is a section heading
if line.startswith('['):
section = line.strip(' []')
# use a list for this because some options can have the same "key"
acl[section] = []
# key=value lines
elif '=' in line:
acl[section].append(line)
# Check for valid keys
key = line.split('=')[0].strip()
if key not in valid_keys:
raise Exception('Unrecognized key in line: "%s"' % line)
# WTF
else:
raise Exception('Unrecognized line: "%s"' % line)
aclfd.close()
if '2' in transformations:
for key in acl:
if key.startswith('access "refs/tags/'):
acl[key] = [
x for x in acl[key]
if not x.startswith('create = ')]
if '3' in transformations:
try:
acl['project'] = [x for x in acl['project'] if x not in
('state = active', 'status = active')]
except KeyError:
pass
if '4' in transformations:
for section in acl.keys():
acl[section] = [x for x in acl[section] if x !=
'owner = group Administrators']
if '5' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
key, value = [x.strip() for x in option.split('=')]
if key == 'exclusiveGroupPermissions':
newsection.append('%s = %s' % (
key, ' '.join(sorted(value.split()))))
else:
newsection.append(option)
acl[section] = newsection
if '6' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
for group in ('openstack-ci-admins', 'openstack-ci-core'):
option = option.replace('group %s' % group, 'group infra-core')
newsection.append(option)
acl[section] = newsection
if '7' in transformations:
special_projects = (
'ossa',
'reviewday',
)
special_teams = (
'admins',
'Bootstrappers',
'committee',
'core',
'maint',
'Managers',
'milestone',
'packagers',
'release',
'Users',
)
for section in acl.keys():
newsection = []
for option in acl[section]:
if ('refs/heads' in section and 'group' in option
and '-2..+2' in option
and not any(x in option for x in special_teams)
and not any(x in aclfile for x in special_projects)):
option = '%s%s' % (option, '-core')
newsection.append(option)
acl[section] = newsection
if '8' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
newsection.append(option)
key, value = [x.strip() for x in option.split('=')]
if key == 'exclusiveGroupPermissions':
exclusives = value.split()
# It's safe for these to be duplicates since we de-dup later
if 'abandon' in exclusives:
newsection.append('abandon = group Change Owner')
newsection.append('abandon = group Project Bootstrappers')
if 'label-Code-Review' in exclusives:
newsection.append('label-Code-Review = -2..+2 '
'group Project Bootstrappers')
newsection.append('label-Code-Review = -1..+1 '
'group Registered Users')
if 'label-Workflow' in exclusives:
newsection.append('label-Workflow = -1..+1 '
'group Project Bootstrappers')
newsection.append('label-Workflow = -1..+0 '
'group Change Owner')
acl[section] = newsection
for section in sorted(acl.keys()):
if acl[section]:
out += '\n[%s]\n' % section
lastoption = ''
for option in sorted(acl[section], key=tokens):
if option != lastoption:
out += '%s\n' % option
lastoption = option
if dry_run:
print(out[1:-1])
else:
aclfd = open(aclfile, 'w')
aclfd.write(out[1:])
aclfd.close()
| |
# -*- coding: utf-8 -*-
import numpy as np
import theano
import theano.tensor as T
import time
import gzip
import SupersenseDataReader
import BIOF1Validation
import keras
from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Merge, Dropout
from keras.optimizers import SGD, adadelta, RMSprop, adam, adagrad
from keras.utils import np_utils
from keras.layers.embeddings import Embedding
#from KerasLayer.FixedEmbedding import FixedEmbedding
windowSize = 2 # 2 to the left, 2 to the right
#numHiddenUnits = 155
numHiddenUnits = 300
trainFile = 'data/SEMtrain1.tsv'
devFile = 'data/SEMdev1.tsv'
testFile = 'data/SEMtest1.tsv'
#trainFile = 'data/ritter-train.tsv'
#trainFile = 'data/joined-train.tsv'
#devFile = 'data/ritter-dev.tsv'
#testFile = 'data/ritter-eval.tsv'
#testFile = 'data/in-house-eval.tsv'
print "Supersenses with Keras with %s" % theano.config.floatX
#####################
#
# Read in the vocab
#
#####################
print "Read in the vocab"
#vocabPath = 'embeddings/GoogleNews-vectors-negative300.vocab_sub' #'embeddings/GoogleVecs.txt_subb'
#vocabPath = 'embeddings/super-text-supertext-wiki-sub250-150-2-sg.txt_sub' #'semcor-embed.sub'
vocabPath= '/home/local/UKP/flekova/superwiki-ALL-200-2-sg.txt'
similarityPath = 'embeddings/similarities-nosuper.txt_sub'
#frequencyPath = '/home/local/UKP/flekova/supertextwikiFreq.txt'
frequencyPath = 'embeddings/wikipediaSenseFrequencies.txt_sub'
word2Idx = {} #Maps a word to the index in the embeddings matrix
sim2Idx = {} #Maps a word to the index in the embeddings matrix
freq2Idx = {}
embeddings = [] #Embeddings matrix
similarities = [] #Similairty embeddings matrix
freqs = []
with open(vocabPath, 'r') as fIn:
idx = 0
for line in fIn:
split = line.strip().split(' ')
embeddings.append(np.array([float(num) for num in split[1:]]))
word2Idx[split[0]] = idx
idx += 1
with open(similarityPath, 'r') as fIn2:
idx = 0
for linne in fIn2:
splitt = linne.strip().split(' ')
similarities.append(np.array([float(num) for num in splitt[1:]]))
sim2Idx[splitt[0]] = idx
idx += 1
with open(frequencyPath, 'r') as fIn3:
idx = 0
for linne in fIn3:
splitt = linne.strip().split(' ')
freqs.append(np.array([float(num) for num in splitt[1:]]))
freq2Idx[splitt[0]] = idx
idx += 1
embeddings = np.asarray(embeddings, dtype=theano.config.floatX)
similarities = np.asarray(similarities, dtype=theano.config.floatX)
freqs = np.asarray(freqs, dtype=theano.config.floatX)
embedding_size = embeddings.shape[1]
similarities_size = similarities.shape[1]
freqs_size = freqs.shape[1]
# Create a mapping for our labels
label2Idx = {'O':0,'0':0}
#label2Idx = {'0':0}
idx = 1
for bioTag in ['B-', 'I-']:
for nerClass in ['verb.cognition', 'verb.change', 'verb.body', 'verb.communication','verb.competition','verb.consumption',
'verb.contact','verb.creation','verb.emotion','verb.motion','verb.perception','verb.possession','verb.social','verb.stative','verb.weather',
'noun.act','noun.animal','noun.artifact','noun.attribute','noun.body','noun.cognition','noun.communication','noun.event','noun.feeling','noun.food',
'noun.group','noun.location','noun.motive','noun.object','noun.person','noun.phenomenon','noun.plant','noun.possession','noun.process',
'noun.quantity','noun.relation','noun.shape','noun.state','noun.substance','noun.time','noun.Tops']:
#for subtype in ['', 'deriv', 'part']:
label2Idx[bioTag+nerClass] = idx
idx += 1
#Inverse label mapping
idx2Label = {v: k for k, v in label2Idx.items()}
#Casing matrix
caseLookup = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'PADDING':5}
#POS matrix
posLookup = {'.':0, 'ADJ':1, 'ADP':2, 'ADV':3, 'CONJ':4, 'DET':5, 'NOUN':6, 'NUM':7, 'PRON':8, 'PRT':9, 'VERB':10, 'X':11,
'\'\'':0,'(':0,')':0,',':0,'.':0,':':0,'CC':4,'CD':7,'DT':5,'EX':5,'FW':11,'HT':11,'IN':2,'JJ':1,'JJR':1,
'JJS':1,'LS':11,'MD':10,'NN':6,'NNP':6,'NNPS':6,'NNS':6,'NONE':11,'O':11,'PDT':5,'POS':9,'PRP':8,'PRP$':8,
'RB':3,'RBR':3,'RBS':3,'RP':9,'RT':0,'SYM':11,'TD':11,'TO':9,'UH':11,'URL':11,'USR':6,'VB':10,'VBD':10,
'VBG':10,'VBN':10,'VBP':10,'VBZ':10,'VPP':10,'WDT':5,'WH':11,'WP':8,'WRB':3,'PADDING':49,'other':50, '$':50,'``':50,'WP':50,'WP$':50} #,'$':51,'WP':52}
#posLookup = {'\'\'':0,'(':1,')':2,',':3,'.':4,':':5,'CC':6,'CD':7,'DT':8,'EX':9,'FW':10,'HT':11,'IN':12,'JJ':13,'JJR':14,
#'JJS':15,'LS':16,'MD':17,'NN':18,'NNP':19,'NNPS':20,'NNS':21,'NONE':22,'O':23,'PDT':24,'POS':25,'PRP':26,'PRP$':27,
#'RB':28,'RBR':29,'RBS':30,'RP':31,'RT':32,'SYM':33,'TD':34,'TO':35,'UH':36,'URL':37,'USR':38,'VB':39,'VBD':40,
#'VBG':41,'VBN':42,'VBP':43,'VBZ':44,'VPP':45,'WDT':46,'WP':47,'WRB':48,'PADDING':49,'other':50, '$':50,'``':50,'WP':50}
superLookup = {'verb.cognition':1, 'verb.change':2, 'verb.body':3, 'verb.communication':4,'verb.competition':5,'verb.consumption':6,
'verb.contact':7,'verb.creation':8,'verb.emotion':9,'verb.motion':10,'verb.perception':11,'verb.possession':12,'verb.social':13,
'verb.stative':14,'verb.weather':15,'noun.act':16,'noun.animal':17,'noun.artifact':18,'noun.attribute':19,'noun.body':20,'noun.cognition':21,
'noun.communication':22,'noun.event':23,'noun.feeling':24,'noun.food':25,'noun.group':26,'noun.location':27,'noun.motive':28,
'noun.object':29,'noun.person':30,'noun.phenomenon':31,'noun.plant':32,'noun.possession':33,'noun.process':34,
'noun.quantity':35,'noun.relation':36,'noun.shape':37,'noun.state':38,'noun.substance':39,'noun.time':40,'noun.Tops':41,'PADDING':42}
caseMatrix = np.identity(len(caseLookup), dtype=theano.config.floatX)
posMatrix = np.identity(len(posLookup), dtype=theano.config.floatX)
# Read in data
print "Read in data and create matrices"
train_sentences = SupersenseDataReader.readFile(trainFile)
dev_sentences = SupersenseDataReader.readFile(devFile)
test_sentences = SupersenseDataReader.readFile(testFile)
# Create numpy arrays
train_x, train_sim_x, train_freq_x, train_case_x, train_pos_x, train_y = SupersenseDataReader.createNumpyArrayWithCasing(train_sentences, windowSize, word2Idx, sim2Idx, freq2Idx, label2Idx, caseLookup, posLookup)
dev_x, dev_sim_x, dev_freq_x, dev_case_x, dev_pos_x, dev_y = SupersenseDataReader.createNumpyArrayWithCasing(dev_sentences, windowSize, word2Idx, sim2Idx, freq2Idx, label2Idx, caseLookup, posLookup)
test_x, test_sim_x, test_freq_x, test_case_x, test_pos_x, test_y = SupersenseDataReader.createNumpyArrayWithCasing(test_sentences, windowSize, word2Idx, sim2Idx, freq2Idx, label2Idx, caseLookup, posLookup)
#####################################
#
# Create the Keras Network
#
#####################################
# Create the train and predict_labels function
n_in = 2*windowSize+1
n_hidden = numHiddenUnits
n_out = len(label2Idx)
number_of_epochs = 50
minibatch_size = 100
embedding_size = embeddings.shape[1]
print "units, epochs, batch: ", n_hidden, number_of_epochs, minibatch_size
#dim_case = 6
x = T.imatrix('x') # the data, one word+context per row
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
print "Embeddings shape sim ",similarities.shape
print "Embeddings shape words ",embeddings.shape
words = Sequential()
words.add(Embedding(output_dim=embeddings.shape[1], input_dim=embeddings.shape[0], input_length=n_in, weights=[embeddings]))
words.add(Dropout(0.25))
words.add(Flatten())
senses = Sequential()
senses.add(Embedding(output_dim=similarities.shape[1], input_dim=similarities.shape[0], input_length=n_in, weights=[similarities]))
senses.add(Dropout(0.25))
senses.add(Flatten())
freq = Sequential()
freq.add(Embedding(output_dim=freqs.shape[1], input_dim=freqs.shape[0], input_length=n_in, weights=[freqs]))
freq.add(Dropout(0.25))
freq.add(Flatten())
casing = Sequential()
casing.add(Embedding(output_dim=caseMatrix.shape[1], input_dim=caseMatrix.shape[0], input_length=n_in, weights=[caseMatrix]))
casing.add(Flatten())
pos = Sequential()
pos.add(Embedding(output_dim=posMatrix.shape[1], input_dim=len(posLookup), input_length=n_in, weights=[posMatrix]))
pos.add(Flatten())
temp3 = Sequential()
temp3.add(Merge([senses, pos], mode='concat'))
temp = Sequential()
temp.add(Merge([temp3, words], mode='concat'))
temp2 = Sequential()
temp2.add(Merge([temp, casing], mode='concat'))
model = Sequential()
model.add(Merge([temp2, freq], mode='concat'))
#relu = keras.layers.advanced_activations.LeakyReLU(alpha=0.3)
model.add(Dense(output_dim=n_hidden, init='glorot_uniform', activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(output_dim=50, input_dim=n_hidden, activation='relu'))
model.add(Dense(output_dim=n_out, init='glorot_uniform', activation='softmax'))
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
#model.compile(loss='mean_squared_error', optimizer=sgd)
print(train_x.shape[0], 'train samples')
print(train_x.shape[1], 'train dimension')
print(test_x.shape[0], 'test samples')
train_y_cat = np_utils.to_categorical(train_y, n_out)
#Function that helps to iterate over our data in minibatches
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
print "%d epochs" % number_of_epochs
print "%d mini batches" % (len(train_x)/minibatch_size)
for epoch in xrange(number_of_epochs):
start_time = time.time()
model.fit([train_sim_x, train_pos_x, train_x, train_case_x, train_freq_x], train_y_cat, nb_epoch=1, batch_size=minibatch_size, verbose=0, shuffle=False)
#for batch in iterate_minibatches(train_x, train_y_cat, minibatch_size, shuffle=False):
# inputs, targets = batch
# model.train_on_batch(inputs, targets)
print "%.2f sec for training" % (time.time() - start_time)
pre_dev, rec_dev, f1_dev = BIOF1Validation.compute_f1(model.predict_classes([dev_sim_x, dev_pos_x, dev_x, dev_case_x, dev_freq_x], verbose=0), dev_y, idx2Label)
pre_test, rec_test, f1_test = BIOF1Validation.compute_f1(model.predict_classes([test_sim_x, test_pos_x, test_x, test_case_x, test_freq_x], verbose=1), test_y, idx2Label)
print test_y.shape[0]
print "%d epoch: prec, rec, F1 on dev: %f %f %f, prec, rec, F1 on test: %f %f %f" % (epoch+1, pre_dev, rec_dev, f1_dev, pre_test, rec_test, f1_test)
#if epoch==stop_epoch:
#for i in range(0, test_y.shape[0]):
#print i, idx2Label[model.predict_classes([test_x, test_pos_x, test_case_x], verbose=0)[i]], idx2Label[test_y[i]]
| |
from mock import patch
from nose.tools import eq_
from helper import MockXPI
from js_helper import _do_test_raw
import validator.xpi as xpi
from validator import submain
from validator.testcases import content
from validator.errorbundler import ErrorBundle
from validator.chromemanifest import ChromeManifest
from validator.constants import *
class MockTestEndpoint(object):
"""
Simulates a test module and reports whether individual tests have been
attempted on it.
"""
def __init__(self, expected, td_error=False):
expectations = {}
for expectation in expected:
expectations[expectation] = {'count': 0,
'subpackage': 0}
self.expectations = expectations
self.td_error = td_error
self.found_tiers = []
def _tier_test(self, err, xpi, name):
'A simulated test case for tier errors'
print 'Generating subpackage tier error...'
self.found_tiers.append(err.tier)
err.error(('foo', ),
'Tier error',
'Just a test')
def __getattribute__(self, name):
"""Detects requests for validation tests and returns an
object that simulates the outcome of a test."""
print 'Requested: %s' % name
if name == 'test_package' and self.td_error:
return self._tier_test
if name in ('expectations',
'assert_expectation',
'td_error',
'_tier_test',
'found_tiers'):
return object.__getattribute__(self, name)
if name in self.expectations:
self.expectations[name]['count'] += 1
if name == 'test_package':
def wrap(package, name, expectation=PACKAGE_ANY):
pass
elif name in ('test_css_file', 'test_unsafe_html', 'process'):
def wrap(err, name, file_data):
pass
else:
def wrap(err, pak):
if isinstance(pak, xpi.XPIManager) and pak.subpackage:
self.expectations[name]['subpackage'] += 1
return wrap
def assert_expectation(self, name, count, type_='count'):
"""Asserts that a particular test has been run a certain number
of times"""
print self.expectations
assert name in self.expectations
eq_(self.expectations[name][type_], count)
class MockMarkupEndpoint(MockTestEndpoint):
'Simulates the markup test module'
def __getattribute__(self, name):
if name == 'MarkupParser':
return lambda x: self
return MockTestEndpoint.__getattribute__(self, name)
def test_xpcnativewrappers():
'Tests that xpcnativewrappers is not in the chrome.manifest'
err = ErrorBundle()
assert content.test_xpcnativewrappers(err, None) is None
err.save_resource('chrome.manifest',
ChromeManifest('foo bar', 'chrome.manifest'))
content.test_xpcnativewrappers(err, None)
assert not err.failed()
err.save_resource('chrome.manifest',
ChromeManifest('xpcnativewrappers on',
'chrome.manifest'))
content.test_xpcnativewrappers(err, None)
assert err.failed()
@patch('validator.testcases.content.testendpoint_validator',
MockTestEndpoint(('test_inner_package', )))
def test_jar_subpackage():
'Tests JAR files that are subpackages.'
err = ErrorBundle()
err.detected_type = PACKAGE_EXTENSION
err.supported_versions = {'foo': ['1.2.3']}
mock_package = MockXPI(
{'chrome/subpackage.jar':
'tests/resources/content/subpackage.jar',
'subpackage.jar':
'tests/resources/content/subpackage.jar'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 2
content.testendpoint_validator.assert_expectation(
'test_inner_package',
2)
content.testendpoint_validator.assert_expectation(
'test_inner_package',
2,
'subpackage')
assert err.supported_versions == {'foo': ['1.2.3']}
@patch('validator.testcases.content.testendpoint_validator',
MockTestEndpoint(('test_package', )))
def test_xpi_subpackage():
'XPIs should never be subpackages; only nested extensions'
err = ErrorBundle()
err.detected_type = PACKAGE_EXTENSION
mock_package = MockXPI(
{'chrome/package.xpi':
'tests/resources/content/subpackage.jar'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 1
content.testendpoint_validator.assert_expectation(
'test_package', 1)
content.testendpoint_validator.assert_expectation(
'test_package', 0, 'subpackage')
@patch('validator.testcases.content.testendpoint_validator',
MockTestEndpoint(('test_package', ), td_error=True))
def test_xpi_tiererror():
'Tests that tiers are reset when a subpackage is encountered'
err = ErrorBundle()
mock_package = MockXPI(
{'foo.xpi': 'tests/resources/content/subpackage.jar'})
err.set_tier(2)
result = content.test_packed_packages(err, mock_package)
assert err.errors[0]['tier'] == 1
assert err.tier == 2
assert all(x == 1 for x in content.testendpoint_validator.found_tiers)
@patch('validator.testcases.content.testendpoint_validator',
MockTestEndpoint(('test_inner_package', 'test_package')))
def test_jar_nonsubpackage():
'Tests XPI files that are not subpackages.'
err = ErrorBundle()
err.detected_type = PACKAGE_MULTI
err.save_resource('is_multipackage', True)
mock_package = MockXPI(
{'foo.jar':
'tests/resources/content/subpackage.jar',
'chrome/bar.jar':
'tests/resources/content/subpackage.jar'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 2
content.testendpoint_validator.assert_expectation(
'test_package',
2)
content.testendpoint_validator.assert_expectation(
'test_package',
0,
'subpackage')
def test_jar_case():
"""Test that the capitalization of JARs is preserved."""
err = ErrorBundle()
mock_package = MockXPI(
{'foo.JaR': 'tests/resources/packagelayout/ext_blacklist.xpi'})
content.test_packed_packages(err, mock_package)
assert err.failed()
for message in err.errors + err.warnings:
assert 'JaR' in message['file'][0]
@patch('validator.testcases.content.testendpoint_markup',
MockMarkupEndpoint(('process', )))
def test_markup():
'Tests markup files in the content validator.'
err = ErrorBundle()
err.supported_versions = {}
mock_package = MockXPI({'foo.xml': 'tests/resources/content/junk.xpi'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 1
content.testendpoint_markup.assert_expectation('process', 1)
content.testendpoint_markup.assert_expectation('process', 0, 'subpackage')
@patch('validator.testcases.content.testendpoint_css',
MockMarkupEndpoint(('test_css_file', )))
def test_css():
'Tests css files in the content validator.'
err = ErrorBundle()
err.supported_versions = {}
mock_package = MockXPI({'foo.css': 'tests/resources/content/junk.xpi'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 1
content.testendpoint_css.assert_expectation('test_css_file', 1)
content.testendpoint_css.assert_expectation('test_css_file', 0,
'subpackage')
def test_hidden_files():
"""Tests that hidden files are reported."""
def test_structure(structure):
err = ErrorBundle()
err.supported_versions = {}
mock_package = MockXPI(structure)
content.test_packed_packages(err, mock_package)
print err.print_summary(verbose=True)
assert err.failed()
for structure in ({'.hidden': 'tests/resources/content/junk.xpi'},
{'dir/__MACOSX/foo': 'tests/resources/content/junk.xpi'},
{'dir/.foo.swp': 'tests/resources/content/junk.xpi'},
{'dir/file.old': 'tests/resources/content/junk.xpi'},
{'dir/file.xul~': 'tests/resources/content/junk.xpi'}):
yield test_structure, structure
def test_password_in_defaults_prefs():
"""
Tests that passwords aren't stored in the defaults/preferences/*.js files
for bug 647109.
"""
password_js = open('tests/resources/content/password.js').read()
assert not _do_test_raw(password_js).failed()
err = ErrorBundle()
err.supported_versions = {}
mock_package = MockXPI({'defaults/preferences/foo.js':
'tests/resources/content/password.js'})
content._process_file(err, mock_package, 'defaults/preferences/foo.js',
password_js, 'foo.js')
print err.print_summary()
assert err.failed()
@patch('validator.testcases.content.testendpoint_langpack',
MockMarkupEndpoint(('test_unsafe_html', )))
def test_langpack():
'Tests a language pack in the content validator.'
err = ErrorBundle()
err.supported_versions = {}
err.detected_type = PACKAGE_LANGPACK
mock_package = MockXPI({'foo.dtd': 'tests/resources/content/junk.xpi'})
result = content.test_packed_packages(err, mock_package)
print result
assert result == 1
content.testendpoint_langpack.assert_expectation('test_unsafe_html', 1)
content.testendpoint_langpack.assert_expectation('test_unsafe_html', 0,
'subpackage')
@patch('validator.testcases.content.testendpoint_validator',
MockMarkupEndpoint(('test_inner_package', )))
def test_jar_subpackage_bad():
'Tests JAR files that are bad subpackages.'
err = ErrorBundle()
mock_package = MockXPI({'chrome/subpackage.jar':
'tests/resources/content/junk.xpi'})
result = content.test_packed_packages(err, mock_package)
print result
assert err.failed()
def test_subpackage_metadata_preserved():
"""Tests that metadata is preserved for sub-packages."""
xpi1 = open('tests/resources/jetpack/jetpack-1.16-outdated.xpi')
xpi2 = MockXPI({
'thing.xpi': 'tests/resources/jetpack/jetpack-1.16-outdated.xpi'})
err1 = ErrorBundle()
err1.detected_type = PACKAGE_EXTENSION
err2 = ErrorBundle()
err2.detected_type = PACKAGE_EXTENSION
submain.test_package(err1, xpi1, 'jetpack-1.16-outdated.xpi')
content.test_packed_packages(err2, xpi2)
assert 'sub_packages' in err2.metadata
eq_(err1.metadata, err2.metadata['sub_packages']
.get('thing.xpi'))
def test_make_script_absolute():
"""Test that _make_script_absolute() works properly."""
msa = content._make_script_absolute
eq_(msa('chrome://a/b.xul', 'chrome://foo/bar.js'), 'chrome://foo/bar.js')
eq_(msa('chrome://a/b.xul', '/foo.js'), 'chrome://a/foo.js')
eq_(msa('chrome://a/b/c.xul', '/foo/bar.js'), 'chrome://a/foo/bar.js')
eq_(msa('chrome://a/b/c.xul', 'foo.js'), 'chrome://a/b/foo.js')
eq_(msa('chrome://a/b/c.xul', 'foo/bar.js'), 'chrome://a/b/foo/bar.js')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import sys
import eventlet.wsgi
import routes.middleware
import ssl
import webob.dec
import webob.exc
from keystone.common import logging
from keystone.common import utils
from keystone import exception
from keystone.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
self.do_ssl = False
self.cert_required = False
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.debug('Starting %(arg0)s on %(host)s:%(port)s' %
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
socket = eventlet.listen((self.host, self.port), backlog=backlog)
if key:
self.socket_info[key] = socket.getsockname()
# SSL is enabled
if self.do_ssl:
if self.cert_required:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
sslsocket = eventlet.wrap_ssl(socket, certfile=self.certfile,
keyfile=self.keyfile,
server_side=True,
cert_reqs=cert_reqs,
ca_certs=self.ca_certs)
socket = sslsocket
self.greenthread = self.pool.spawn(self._run, self.application, socket)
def set_ssl(self, certfile, keyfile=None, ca_certs=None,
cert_required=True):
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
self.cert_required = cert_required
self.do_ssl = True
def kill(self):
if self.greenthread:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
log = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(log))
class Request(webob.Request):
pass
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls()
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
class Application(BaseApplication):
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
LOG.debug('arg_dict: %s', arg_dict)
# allow middleware up the stack to provide context & params
context = req.environ.get('openstack.context', {})
context['query_string'] = dict(req.params.iteritems())
params = req.environ.get('openstack.params', {})
params.update(arg_dict)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning("Authorization failed. %s from %s"
% (e, req.environ['REMOTE_ADDR']))
return render_exception(e)
except exception.Error as e:
LOG.warning(e)
return render_exception(e)
except Exception as e:
logging.exception(e)
return render_exception(exception.UnexpectedError(exception=e))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, basestring):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
return render_response(body=result)
def _normalize_arg(self, arg):
return str(arg).replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return dict([(self._normalize_arg(k), v)
for (k, v) in d.iteritems()])
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = self.token_api.get_token(
context=context, token_id=context['token_id'])
except exception.TokenNotFound:
raise exception.Unauthorized()
creds = user_token_ref['metadata'].copy()
try:
creds['user_id'] = user_token_ref['user'].get('id')
except AttributeError:
logging.debug('Invalid user')
raise exception.Unauthorized()
try:
creds['tenant_id'] = user_token_ref['tenant'].get('id')
except AttributeError:
logging.debug('Invalid tenant')
raise exception.Unauthorized()
# NOTE(vish): this is pretty inefficient
creds['roles'] = [self.identity_api.get_role(context, role)['name']
for role in creds.get('roles', [])]
# Accept either is_admin or the admin role
self.policy_api.enforce(context, creds, 'admin_required', {})
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug(line)
LOG.debug('')
resp = req.get_response(self.application)
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in resp.headers.iteritems():
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def render_response(body=None, status=None, headers=None):
"""Forms a WSGI response."""
headers = headers or []
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
return webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
def render_exception(error):
"""Forms a WSGI response based on the current error."""
return render_response(status=(error.code, error.title), body={
'error': {
'code': error.code,
'title': error.title,
'message': str(error),
}
})
| |
import logging
import random
import threading
import time
from plumbum.commands import BaseCommand, run_proc
from plumbum.commands.processes import ProcessExecutionError
from plumbum.machines.base import PopenAddons
class ShellSessionError(Exception):
"""Raises when something goes wrong when calling
:func:`ShellSession.popen <plumbum.session.ShellSession.popen>`"""
class SSHCommsError(ProcessExecutionError, EOFError):
"""Raises when the communication channel can't be created on the
remote host or it times out."""
class SSHCommsChannel2Error(SSHCommsError):
"""Raises when channel 2 (stderr) is not available"""
class IncorrectLogin(SSHCommsError):
"""Raises when incorrect login credentials are provided"""
class HostPublicKeyUnknown(SSHCommsError):
"""Raises when the host public key isn't known"""
shell_logger = logging.getLogger("plumbum.shell")
# ===================================================================================================
# Shell Session Popen
# ===================================================================================================
class MarkedPipe:
"""A pipe-like object from which you can read lines; the pipe will return report EOF (the
empty string) when a special marker is detected"""
__slots__ = ["pipe", "marker", "__weakref__"]
def __init__(self, pipe, marker):
self.pipe = pipe
self.marker = marker
self.marker = bytes(self.marker, "ascii")
def close(self):
"""'Closes' the marked pipe; following calls to ``readline`` will return """ ""
# consume everything
while self.readline():
pass
self.pipe = None
def readline(self):
"""Reads the next line from the pipe; returns "" when the special marker is reached.
Raises ``EOFError`` if the underlying pipe has closed"""
if self.pipe is None:
return b""
line = self.pipe.readline()
if not line:
raise EOFError()
if line.strip() == self.marker:
self.pipe = None
line = b""
return line
class SessionPopen(PopenAddons):
"""A shell-session-based ``Popen``-like object (has the following attributes: ``stdin``,
``stdout``, ``stderr``, ``returncode``)"""
def __init__(self, proc, argv, isatty, stdin, stdout, stderr, encoding, *, host):
self.host = host
self.proc = proc
self.argv = argv
self.isatty = isatty
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.custom_encoding = encoding
self.returncode = None
self._done = False
def poll(self):
"""Returns the process' exit code or ``None`` if it's still running"""
return self.returncode if self._done else None
def wait(self):
"""Waits for the process to terminate and returns its exit code"""
self.communicate()
return self.returncode
def communicate(self, input=None): # pylint: disable=redefined-builtin
"""Consumes the process' stdout and stderr until the it terminates.
:param input: An optional bytes/buffer object to send to the process over stdin
:returns: A tuple of (stdout, stderr)
"""
stdout = []
stderr = []
sources = [("1", stdout, self.stdout)]
if not self.isatty:
# in tty mode, stdout and stderr are unified
sources.append(("2", stderr, self.stderr))
i = 0
while sources:
if input:
chunk = input[:1000]
self.stdin.write(chunk)
self.stdin.flush()
input = input[1000:]
i = (i + 1) % len(sources)
name, coll, pipe = sources[i]
try:
line = pipe.readline()
shell_logger.debug("%s> %r", name, line)
except EOFError as err:
shell_logger.debug("%s> Nothing returned.", name)
self.proc.poll()
returncode = self.proc.returncode
stdout = b"".join(stdout).decode(self.custom_encoding, "ignore")
stderr = b"".join(stderr).decode(self.custom_encoding, "ignore")
argv = self.argv.decode(self.custom_encoding, "ignore").split(";")[:1]
if returncode == 5:
raise IncorrectLogin(
argv,
returncode,
stdout,
stderr,
message="Incorrect username or password provided",
host=self.host,
) from None
if returncode == 6:
raise HostPublicKeyUnknown(
argv,
returncode,
stdout,
stderr,
message="The authenticity of the host can't be established",
host=self.host,
) from None
if returncode != 0:
raise SSHCommsError(
argv,
returncode,
stdout,
stderr,
message="SSH communication failed",
host=self.host,
) from None
if name == "2":
raise SSHCommsChannel2Error(
argv,
returncode,
stdout,
stderr,
message="No stderr result detected. Does the remote have Bash as the default shell?",
host=self.host,
) from None
raise SSHCommsError(
argv,
returncode,
stdout,
stderr,
message="No communication channel detected. Does the remote exist?",
host=self.host,
) from err
if not line:
del sources[i]
else:
coll.append(line)
if self.isatty:
stdout.pop(0) # discard first line of prompt
try:
self.returncode = int(stdout.pop(-1))
except (IndexError, ValueError):
self.returncode = "Unknown"
self._done = True
stdout = b"".join(stdout)
stderr = b"".join(stderr)
return stdout, stderr
class ShellSession:
"""An abstraction layer over *shell sessions*. A shell session is the execution of an
interactive shell (``/bin/sh`` or something compatible), over which you may run commands
(sent over stdin). The output of is then read from stdout and stderr. Shell sessions are
less "robust" than executing a process on its own, and they are susseptible to all sorts
of malformatted-strings attacks, and there is little benefit from using them locally.
However, they can greatly speed up remote connections, and are required for the implementation
of :class:`SshMachine <plumbum.machines.remote.SshMachine>`, as they allow us to send multiple
commands over a single SSH connection (setting up separate SSH connections incurs a high
overhead). Try to avoid using shell sessions, unless you know what you're doing.
Instances of this class may be used as *context-managers*.
:param proc: The underlying shell process (with open stdin, stdout and stderr)
:param encoding: The encoding to use for the shell session. If ``"auto"``, the underlying
process' encoding is used.
:param isatty: If true, assume the shell has a TTY and that stdout and stderr are unified
:param connect_timeout: The timeout to connect to the shell, after which, if no prompt
is seen, the shell process is killed
"""
def __init__(
self, proc, encoding="auto", isatty=False, connect_timeout=5, *, host=None
):
self.host = host
self.proc = proc
self.custom_encoding = proc.custom_encoding if encoding == "auto" else encoding
self.isatty = isatty
self._lock = threading.RLock()
self._current = None
if connect_timeout:
def closer():
shell_logger.error(
"Connection to %s timed out (%d sec)", proc, connect_timeout
)
self.close()
timer = threading.Timer(connect_timeout, closer)
timer.start()
try:
self.run("")
finally:
if connect_timeout:
timer.cancel()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __del__(self):
try:
self.close()
except Exception:
pass
def alive(self):
"""Returns ``True`` if the underlying shell process is alive, ``False`` otherwise"""
return self.proc and self.proc.poll() is None
def close(self):
"""Closes (terminates) the shell session"""
if not self.alive():
return
try:
self.proc.stdin.write(b"\nexit\n\n\nexit\n\n")
self.proc.stdin.flush()
time.sleep(0.05)
except (ValueError, OSError):
pass
for p in [self.proc.stdin, self.proc.stdout, self.proc.stderr]:
try:
p.close()
except Exception:
pass
try:
self.proc.kill()
except OSError:
pass
self.proc = None
def popen(self, cmd):
"""Runs the given command in the shell, adding some decoration around it. Only a single
command can be executed at any given time.
:param cmd: The command (string or :class:`Command <plumbum.commands.BaseCommand>` object)
to run
:returns: A :class:`SessionPopen <plumbum.session.SessionPopen>` instance
"""
if self.proc is None:
raise ShellSessionError("Shell session has already been closed")
if self._current and not self._current._done:
raise ShellSessionError("Each shell may start only one process at a time")
if isinstance(cmd, BaseCommand):
full_cmd = cmd.formulate(1)
else:
full_cmd = cmd
marker = f"--.END{time.time() * random.random()}.--"
if full_cmd.strip():
full_cmd += " ; "
else:
full_cmd = "true ; "
full_cmd += f"echo $? ; echo '{marker}'"
if not self.isatty:
full_cmd += f" ; echo '{marker}' 1>&2"
if self.custom_encoding:
full_cmd = full_cmd.encode(self.custom_encoding)
shell_logger.debug("Running %r", full_cmd)
self.proc.stdin.write(full_cmd + b"\n")
self.proc.stdin.flush()
self._current = SessionPopen(
self.proc,
full_cmd,
self.isatty,
self.proc.stdin,
MarkedPipe(self.proc.stdout, marker),
MarkedPipe(self.proc.stderr, marker),
self.custom_encoding,
host=self.host,
)
return self._current
def run(self, cmd, retcode=0):
"""Runs the given command
:param cmd: The command (string or :class:`Command <plumbum.commands.BaseCommand>` object)
to run
:param retcode: The expected return code (0 by default). Set to ``None`` in order to
ignore erroneous return codes
:returns: A tuple of (return code, stdout, stderr)
"""
with self._lock:
return run_proc(self.popen(cmd), retcode)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import json
import os
import tempfile
import testtools
from glance.openstack.common import timeutils
from glance.tests.integration.legacy_functional import base
from glance.tests.utils import minimal_headers
FIVE_KB = 5 * 1024
FIVE_GB = 5 * 1024 * 1024 * 1024
class TestApi(base.ApiTest):
def test_get_head_simple_post(self):
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. GET /images/detail
# Verify no public images
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 2. POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['image']['id']
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
# 3. HEAD image
# Verify image found now
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# 4. GET image
# Verify all information on image we just added is correct
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'x-image-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
# 5. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(json.loads(content), expected_result)
# 6. GET /images/detail
# Verify image and all its metadata
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
image = json.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 7. PUT image with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(data['image']['properties']['arch'], "x86_64")
self.assertEqual(data['image']['properties']['distro'], "Ubuntu")
# 8. GET /images/detail
# Verify image and all its metadata
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
image = json.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 9. PUT image and remove a previously existing property.
headers = {'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images'][0]
self.assertEqual(len(data['properties']), 1)
self.assertEqual(data['properties']['arch'], "x86_64")
# 10. PUT image and add a previously deleted property.
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(response.status, 200)
data = json.loads(content)
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images'][0]
self.assertEqual(len(data['properties']), 2)
self.assertEqual(data['properties']['arch'], "x86_64")
self.assertEqual(data['properties']['distro'], "Ubuntu")
self.assertNotEqual(data['created_at'], data['updated_at'])
# DELETE image
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
def test_queued_process_flow(self):
"""
We test the process flow where a user registers an image
with Glance but does not immediately upload an image file.
Later, the user uploads an image file using a PUT operation.
We track the changing of image status throughout this process.
0. GET /images
- Verify no public images
1. POST /images with public image named Image1 with no location
attribute and no image data.
- Verify 201 returned
2. GET /images
- Verify one public image
3. HEAD image
- Verify image now in queued status
4. PUT image with image data
- Verify 200 returned
5. HEAD images
- Verify image now in active status
6. GET /images
- Verify one public image
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. POST /images with public image named Image1
# with no location or image data
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'], None)
self.assertEqual(data['image']['size'], 0)
self.assertEqual(data['image']['container_format'], 'ovf')
self.assertEqual(data['image']['disk_format'], 'raw')
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# 2. GET /images
# Verify 1 public image
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(data['images'][0]['id'], image_id)
self.assertEqual(data['images'][0]['checksum'], None)
self.assertEqual(data['images'][0]['size'], 0)
self.assertEqual(data['images'][0]['container_format'], 'ovf')
self.assertEqual(data['images'][0]['disk_format'], 'raw')
self.assertEqual(data['images'][0]['name'], "Image1")
# 3. HEAD /images
# Verify status is in queued
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
self.assertEqual(response['x-image-meta-status'], "queued")
self.assertEqual(response['x-image-meta-size'], '0')
self.assertEqual(response['x-image-meta-id'], image_id)
# 4. PUT image with image data, verify 200 returned
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream'}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT', headers=headers,
body=image_data)
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
# 5. HEAD /images
# Verify status is in active
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
self.assertEqual(response['x-image-meta-status'], "active")
# 6. GET /images
# Verify 1 public image still...
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(data['images'][0]['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['images'][0]['id'], image_id)
self.assertEqual(data['images'][0]['size'], FIVE_KB)
self.assertEqual(data['images'][0]['container_format'], 'ovf')
self.assertEqual(data['images'][0]['disk_format'], 'raw')
self.assertEqual(data['images'][0]['name'], "Image1")
# DELETE image
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
def test_size_greater_2G_mysql(self):
"""
A test against the actual datastore backend for the registry
to ensure that the image size property is not truncated.
:see https://bugs.launchpad.net/glance/+bug/739433
"""
# 1. POST /images with public image named Image1
# attribute and a size of 5G. Use the HTTP engine with an
# X-Image-Meta-Location attribute to make Glance forego
# "adding" the image data.
# Verify a 201 OK is returned
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Location': 'http://example.com/fakeimage',
'X-Image-Meta-Size': str(FIVE_GB),
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-disk_format': 'raw',
'X-image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
# 2. HEAD /images
# Verify image size is what was passed in, and not truncated
path = response.get('location')
response, content = self.http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-size'], str(FIVE_GB))
self.assertEqual(response['x-image-meta-name'], 'Image1')
self.assertEqual(response['x-image-meta-is_public'], 'True')
def test_v1_not_enabled(self):
self.config(enable_v1_api=False)
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 300)
def test_v1_enabled(self):
self.config(enable_v1_api=True)
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
def test_zero_initial_size(self):
"""
A test to ensure that an image with size explicitly set to zero
has status that immediately transitions to active.
"""
# 1. POST /images with public image named Image1
# attribute and a size of zero.
# Verify a 201 OK is returned
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Size': '0',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-disk_format': 'raw',
'X-image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
# 2. HEAD image-location
# Verify image size is zero and the status is active
path = response.get('location')
response, content = self.http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-size'], '0')
self.assertEqual(response['x-image-meta-status'], 'active')
# 3. GET image-location
# Verify image content is empty
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(len(content), 0)
def test_traceback_not_consumed(self):
"""
A test that errors coming from the POST API do not
get consumed and print the actual error message, and
not something like <traceback object at 0x1918d40>
:see https://bugs.launchpad.net/glance/+bug/755912
"""
# POST /images with binary data, but not setting
# Content-Type to application/octet-stream, verify a
# 400 returned and that the error is readable.
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
path = "/v1/images"
headers = minimal_headers('Image1')
headers['Content-Type'] = 'not octet-stream'
response, content = self.http.request(path, 'POST',
body=test_data_file.name,
headers=headers)
self.assertEqual(response.status, 400)
expected = "Content-Type must be application/octet-stream"
self.assertTrue(expected in content,
"Could not find '%s' in '%s'" % (expected, content))
def test_filtered_images(self):
"""
Set up four test images and ensure each query param filter works
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
image_ids = []
# 1. POST /images with three public images, and one private image
# with various attributes
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'True',
'X-Image-Meta-Property-pants': 'are on'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['properties']['pants'], "are on")
self.assertEqual(data['image']['is_public'], True)
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Image!',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vhd',
'X-Image-Meta-Size': '20',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'False',
'X-Image-Meta-Property-pants': 'are on'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['properties']['pants'], "are on")
self.assertEqual(data['image']['is_public'], True)
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Image!',
'X-Image-Meta-Status': 'saving',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '21',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'False',
'X-Image-Meta-Property-pants': 'are off'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['properties']['pants'], "are off")
self.assertEqual(data['image']['is_public'], True)
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Private Image',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '22',
'X-Image-Meta-Is-Public': 'False',
'X-Image-Meta-Protected': 'False'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['is_public'], False)
image_ids.append(data['image']['id'])
# 2. GET /images
# Verify three public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
# 3. GET /images with name filter
# Verify correct images returned with name
params = "name=My%20Image!"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertEqual(image['name'], "My Image!")
# 4. GET /images with status filter
# Verify correct images returned with status
params = "status=queued"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
for image in data['images']:
self.assertEqual(image['status'], "queued")
params = "status=active"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 0)
# 5. GET /images with container_format filter
# Verify correct images returned with container_format
params = "container_format=ovf"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertEqual(image['container_format'], "ovf")
# 6. GET /images with disk_format filter
# Verify correct images returned with disk_format
params = "disk_format=vdi"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 1)
for image in data['images']:
self.assertEqual(image['disk_format'], "vdi")
# 7. GET /images with size_max filter
# Verify correct images returned with size <= expected
params = "size_max=20"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertTrue(image['size'] <= 20)
# 8. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_min=20"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertTrue(image['size'] >= 20)
# 9. Get /images with is_public=None filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=None"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 4)
# 10. Get /images with is_public=False filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=False"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 1)
for image in data['images']:
self.assertEqual(image['name'], "My Private Image")
# 11. Get /images with is_public=True filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=True"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
for image in data['images']:
self.assertNotEqual(image['name'], "My Private Image")
# 12. Get /images with protected=False filter
# Verify correct images returned with property
params = "protected=False"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertNotEqual(image['name'], "Image1")
# 13. Get /images with protected=True filter
# Verify correct images returned with property
params = "protected=True"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 1)
for image in data['images']:
self.assertEqual(image['name'], "Image1")
# 14. GET /images with property filter
# Verify correct images returned with property
params = "property-pants=are%20on"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
for image in data['images']:
self.assertEqual(image['properties']['pants'], "are on")
# 15. GET /images with property filter and name filter
# Verify correct images returned with property and name
# Make sure you quote the url when using more than one param!
params = "name=My%20Image!&property-pants=are%20on"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 1)
for image in data['images']:
self.assertEqual(image['properties']['pants'], "are on")
self.assertEqual(image['name'], "My Image!")
# 16. GET /images with past changes-since filter
yesterday = timeutils.isotime(timeutils.utcnow() -
datetime.timedelta(1))
params = "changes-since=%s" % yesterday
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
# one timezone west of Greenwich equates to an hour ago
# taking care to pre-urlencode '+' as '%2B', otherwise the timezone
# '+' is wrongly decoded as a space
# TODO(eglynn): investigate '+' --> <SPACE> decoding, an artifact
# of WSGI/webob dispatch?
now = timeutils.utcnow()
hour_ago = now.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00')
params = "changes-since=%s" % hour_ago
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
# 17. GET /images with future changes-since filter
tomorrow = timeutils.isotime(timeutils.utcnow() +
datetime.timedelta(1))
params = "changes-since=%s" % tomorrow
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 0)
# one timezone east of Greenwich equates to an hour from now
now = timeutils.utcnow()
hour_hence = now.strftime('%Y-%m-%dT%H:%M:%S-01:00')
params = "changes-since=%s" % hour_hence
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 0)
# 18. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_min=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 400)
self.assertTrue("filter size_min got -1" in content)
# 19. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_max=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 400)
self.assertTrue("filter size_max got -1" in content)
# 20. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "min_ram=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 400)
self.assertTrue("Bad value passed to filter min_ram got -1" in content)
# 21. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "protected=imalittleteapot"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 400)
self.assertTrue("protected got imalittleteapot" in content)
# 22. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "is_public=imalittleteapot"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 400)
self.assertTrue("is_public got imalittleteapot" in content)
def test_limited_images(self):
"""
Ensure marker and limit query params work
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
image_ids = []
# 1. POST /images with three public images with various attributes
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
headers = minimal_headers('Image2')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
headers = minimal_headers('Image3')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
# 2. GET /images with all images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = json.loads(content)['images']
self.assertEqual(len(images), 3)
# 3. GET /images with limit of 2
# Verify only two images were returned
params = "limit=2"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images']
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], images[0]['id'])
self.assertEqual(data[1]['id'], images[1]['id'])
# 4. GET /images with marker
# Verify only two images were returned
params = "marker=%s" % images[0]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images']
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], images[1]['id'])
self.assertEqual(data[1]['id'], images[2]['id'])
# 5. GET /images with marker and limit
# Verify only one image was returned with the correct id
params = "limit=1&marker=%s" % images[1]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images']
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], images[2]['id'])
# 6. GET /images/detail with marker and limit
# Verify only one image was returned with the correct id
params = "limit=1&marker=%s" % images[1]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)['images']
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], images[2]['id'])
# DELETE images
for image_id in image_ids:
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
def test_ordered_images(self):
"""
Set up three test images and ensure each query param filter works
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. POST /images with three public images with various attributes
image_ids = []
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'ASDF',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'bare',
'X-Image-Meta-Disk-Format': 'iso',
'X-Image-Meta-Size': '2',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'XYZ',
'X-Image-Meta-Status': 'saving',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '5',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image_ids.append(json.loads(content)['image']['id'])
# 2. GET /images with no query params
# Verify three public images sorted by created_at desc
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
self.assertEqual(data['images'][0]['id'], image_ids[2])
self.assertEqual(data['images'][1]['id'], image_ids[1])
self.assertEqual(data['images'][2]['id'], image_ids[0])
# 3. GET /images sorted by name asc
params = 'sort_key=name&sort_dir=asc'
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
self.assertEqual(data['images'][0]['id'], image_ids[1])
self.assertEqual(data['images'][1]['id'], image_ids[0])
self.assertEqual(data['images'][2]['id'], image_ids[2])
# 4. GET /images sorted by size desc
params = 'sort_key=size&sort_dir=desc'
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 3)
self.assertEqual(data['images'][0]['id'], image_ids[0])
self.assertEqual(data['images'][1]['id'], image_ids[2])
self.assertEqual(data['images'][2]['id'], image_ids[1])
# 5. GET /images sorted by size desc with a marker
params = 'sort_key=size&sort_dir=desc&marker=%s' % image_ids[0]
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 2)
self.assertEqual(data['images'][0]['id'], image_ids[2])
self.assertEqual(data['images'][1]['id'], image_ids[1])
# 6. GET /images sorted by name asc with a marker
params = 'sort_key=name&sort_dir=asc&marker=%s' % image_ids[2]
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertEqual(len(data['images']), 0)
# DELETE images
for image_id in image_ids:
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
def test_duplicate_image_upload(self):
"""
Upload initial image, then attempt to upload duplicate image
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. POST /images with public image named Image1
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
image = json.loads(content)['image']
# 2. POST /images with public image named Image1, and ID: 1
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1 Update',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Id': image['id'],
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 409)
def test_delete_not_existing(self):
"""
We test the following:
0. GET /images/1
- Verify 404
1. DELETE /images/1
- Verify 404
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. DELETE /images/1
# Verify 404 returned
path = "/v1/images/1"
response, content = self.http.request(path, 'DELETE')
self.assertEqual(response.status, 404)
def _do_test_post_image_content_bad_format(self, format):
"""
We test that missing container/disk format fails with 400 "Bad Request"
:see https://bugs.launchpad.net/glance/+bug/933702
"""
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = json.loads(content)['images']
self.assertEqual(len(images), 0)
path = "/v1/images"
# POST /images without given format being specified
headers = minimal_headers('Image1')
headers['X-Image-Meta-' + format] = 'bad_value'
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'POST',
headers=headers,
body=test_data_file.name)
self.assertEqual(response.status, 400)
type = format.replace('_format', '')
expected = "Invalid %s format 'bad_value' for image" % type
self.assertTrue(expected in content,
"Could not find '%s' in '%s'" % (expected, content))
# make sure the image was not created
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(response.status, 200)
images = json.loads(content)['images']
self.assertEqual(len(images), 0)
def test_post_image_content_bad_container_format(self):
self._do_test_post_image_content_bad_format('container_format')
def test_post_image_content_bad_disk_format(self):
self._do_test_post_image_content_bad_format('disk_format')
def _do_test_put_image_content_missing_format(self, format):
"""
We test that missing container/disk format only fails with
400 "Bad Request" when the image content is PUT (i.e. not
on the original POST of a queued image).
:see https://bugs.launchpad.net/glance/+bug/937216
"""
# POST queued image
path = "/v1/images"
headers = {
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True',
}
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['image']['id']
self.addDetail('image_data', testtools.content.json_content(data))
# PUT image content images without given format being specified
path = "/v1/images/%s" % (image_id)
headers = minimal_headers('Image1')
del headers['X-Image-Meta-' + format]
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'PUT',
headers=headers,
body=test_data_file.name)
self.assertEqual(response.status, 400)
type = format.replace('_format', '')
expected = "Invalid %s format 'None' for image" % type
self.assertTrue(expected in content,
"Could not find '%s' in '%s'" % (expected, content))
def test_put_image_content_bad_container_format(self):
self._do_test_put_image_content_missing_format('container_format')
def test_put_image_content_bad_disk_format(self):
self._do_test_put_image_content_missing_format('disk_format')
def _do_test_mismatched_attribute(self, attribute, value):
"""
Test mismatched attribute.
"""
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
headers[attribute] = value
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 400)
images_dir = os.path.join(self.test_dir, 'images')
image_count = len([name for name in os.listdir(images_dir)
if os.path.isfile(os.path.join(images_dir, name))])
self.assertEquals(image_count, 0)
def test_mismatched_size(self):
"""
Test mismatched size.
"""
self._do_test_mismatched_attribute('x-image-meta-size',
str(FIVE_KB + 1))
def test_mismatched_checksum(self):
"""
Test mismatched checksum.
"""
self._do_test_mismatched_attribute('x-image-meta-checksum',
'foobar')
class TestApiWithFakeAuth(base.ApiTest):
def __init__(self, *args, **kwargs):
super(TestApiWithFakeAuth, self).__init__(*args, **kwargs)
self.api_flavor = 'fakeauth'
self.registry_flavor = 'fakeauth'
def test_ownership(self):
# Add an image with admin privileges and ensure the owner
# can be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers = {
'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Owner': 'tenant2',
}
create_headers.update(auth_headers)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['image']['id']
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(response.status, 200)
self.assertEqual('tenant2', response['x-image-meta-owner'])
# Now add an image without admin privileges and ensure the owner
# cannot be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:role1',
}
create_headers.update(auth_headers)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['image']['id']
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers.update(auth_headers)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(response.status, 200)
self.assertEqual('tenant1', response['x-image-meta-owner'])
# Make sure the non-privileged user can't update their owner either
update_headers = {
'X-Image-Meta-Name': 'MyImage2',
'X-Image-Meta-Owner': 'tenant2',
'X-Auth-Token': 'user1:tenant1:role1',
}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(response.status, 200)
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(response.status, 200)
self.assertEqual('tenant1', response['x-image-meta-owner'])
# An admin user should be able to update the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant3:admin',
}
update_headers = {
'X-Image-Meta-Name': 'MyImage2',
'X-Image-Meta-Owner': 'tenant2',
}
update_headers.update(auth_headers)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(response.status, 200)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(response.status, 200)
self.assertEqual('tenant2', response['x-image-meta-owner'])
def test_image_visibility_to_different_users(self):
owners = ['admin', 'tenant1', 'tenant2', 'none']
visibilities = {'public': 'True', 'private': 'False'}
image_ids = {}
for owner in owners:
for visibility, is_public in visibilities.items():
name = '%s-%s' % (owner, visibility)
headers = {
'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': name,
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Is-Public': is_public,
'X-Image-Meta-Owner': owner,
'X-Auth-Token': 'createuser:createtenant:admin',
}
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_ids[name] = data['image']['id']
def list_images(tenant, role='', is_public=None):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
path = "/v1/images/detail"
if is_public is not None:
path += '?is_public=%s' % is_public
response, content = self.http.request(path, 'GET', headers=headers)
self.assertEqual(response.status, 200)
return json.loads(content)['images']
# 1. Known user sees public and their own images
images = list_images('tenant1')
self.assertEquals(len(images), 5)
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'tenant1')
# 2. Unknown user sees only public images
images = list_images('none')
self.assertEquals(len(images), 4)
for image in images:
self.assertTrue(image['is_public'])
# 3. Unknown admin sees only public images
images = list_images('none', role='admin')
self.assertEquals(len(images), 4)
for image in images:
self.assertTrue(image['is_public'])
# 4. Unknown admin, is_public=none, shows all images
images = list_images('none', role='admin', is_public='none')
self.assertEquals(len(images), 8)
# 5. Unknown admin, is_public=true, shows only public images
images = list_images('none', role='admin', is_public='true')
self.assertEquals(len(images), 4)
for image in images:
self.assertTrue(image['is_public'])
# 6. Unknown admin, is_public=false, sees only private images
images = list_images('none', role='admin', is_public='false')
self.assertEquals(len(images), 4)
for image in images:
self.assertFalse(image['is_public'])
# 7. Known admin sees public and their own images
images = list_images('admin', role='admin')
self.assertEquals(len(images), 5)
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'admin')
# 8. Known admin, is_public=none, shows all images
images = list_images('admin', role='admin', is_public='none')
self.assertEquals(len(images), 8)
# 9. Known admin, is_public=true, sees all public and their images
images = list_images('admin', role='admin', is_public='true')
self.assertEquals(len(images), 5)
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'admin')
# 10. Known admin, is_public=false, sees all private images
images = list_images('admin', role='admin', is_public='false')
self.assertEquals(len(images), 4)
for image in images:
self.assertFalse(image['is_public'])
def test_property_protections(self):
# Enable property protection
self.config(property_protection_file=self.property_file)
self.setUp()
CREATE_HEADERS = {
'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Owner': 'tenant2',
}
# Create an image for role member with extra properties
# Raises 403 since user is not allowed to create 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-image-meta-property-foo': 'bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(response.status, 403)
# Create an image for role member without 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-image-meta-property-x_owner_foo': 'o_s_bar',
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(response.status, 201)
# Returned image entity should have 'x_owner_foo'
data = json.loads(content)
self.assertEqual(data['image']['properties']['x_owner_foo'],
'o_s_bar')
# Create an image for role spl_role with extra properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_create_prop': 'create_bar',
'X-Image-Meta-Property-spl_read_prop': 'read_bar',
'X-Image-Meta-Property-spl_update_prop': 'update_bar',
'X-Image-Meta-Property-spl_delete_prop': 'delete_bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['image']['id']
# Attempt to update two properties, one protected(spl_read_prop), the
# other not(spl_update_prop). Request should be forbidden.
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_read_prop': 'r',
'X-Image-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(response.status, 403)
# Attempt to create properties which are forbidden
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_new_prop': 'new',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(response.status, 403)
# Attempt to update, create and delete properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_create_prop': 'create_bar',
'X-Image-Meta-Property-spl_read_prop': 'read_bar',
'X-Image-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(response.status, 200)
# Returned image entity should reflect the changes
image = json.loads(content)
# 'spl_update_prop' has update permission for spl_role
# hence the value has changed
self.assertEqual('u', image['image']['properties']['spl_update_prop'])
# 'spl_delete_prop' has delete permission for spl_role
# hence the property has been deleted
self.assertTrue('spl_delete_prop' not in image['image']['properties'])
# 'spl_create_prop' has create permission for spl_role
# hence the property has been created
self.assertEqual('create_bar',
image['image']['properties']['spl_create_prop'])
# Image Deletion should work
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'DELETE',
headers=auth_headers)
self.assertEqual(response.status, 200)
# This image should be no longer be directly accessible
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(response.status, 404)
| |
# coding=utf-8
"""
The ELB collector collects metrics for one or more Amazon AWS ELBs
#### Configuration
Below is an example configuration for the ELBCollector.
You can specify an arbitrary amount of regions
```
enabled = true
interval = 60
# Optional
access_key_id = ...
secret_access_key = ...
# Optional - Available keys: region, zone, elb_name, metric_name
format = $elb_name.$zone.$metric_name
# Optional - list of regular expressions used to ignore ELBs
elbs_ignored = ^elb-a$, .*-test$, $test-.*
[regions]
[[us-west-1]]
# Optional - queries all elbs if omitted
elb_names = elb1, elb2, ...
[[us-west-2]]
...
```
#### Dependencies
* boto
"""
import calendar
import cPickle
import datetime
import functools
import re
import time
import threading
from collections import namedtuple
from string import Template
import diamond.collector
from diamond.collector import str_to_bool
from diamond.metric import Metric
try:
import boto.ec2.elb
from boto.ec2 import cloudwatch
except ImportError:
cloudwatch = False
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
the function is not re-evaluated.
Based upon from http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Nota bene: this decorator memoizes /all/ calls to the function. For
a memoization decorator with limited cache size, consider:
bit.ly/1wtHmlM
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kwargs):
# If the function args cannot be used as a cache hash key, fail fast
key = cPickle.dumps((args, kwargs))
try:
return self.cache[key]
except KeyError:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def utc_to_local(utc_dt):
"""
:param utc_dt: datetime in UTC
:return: datetime in the local timezone
"""
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= datetime.timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
@memoized
def get_zones(region, auth_kwargs):
"""
:param region: region to get the availability zones for
:return: list of availability zones
"""
ec2_conn = boto.ec2.connect_to_region(region, **auth_kwargs)
return [zone.name for zone in ec2_conn.get_all_zones()]
class ElbCollector(diamond.collector.Collector):
# default_to_zero means if cloudwatch does not return a stat for the
# given metric, then just default it to zero.
MetricInfo = namedtuple(
'MetricInfo',
'name aws_type diamond_type precision default_to_zero')
# AWS metrics for ELBs
metrics = [
MetricInfo('HealthyHostCount', 'Average', 'GAUGE', 0, False),
MetricInfo('UnHealthyHostCount', 'Average', 'GAUGE', 0, False),
MetricInfo('RequestCount', 'Sum', 'GAUGE', 0, True),
MetricInfo('Latency', 'Average', 'GAUGE', 4, False),
MetricInfo('HTTPCode_ELB_4XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('HTTPCode_ELB_5XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('HTTPCode_Backend_2XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('HTTPCode_Backend_3XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('HTTPCode_Backend_4XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('HTTPCode_Backend_5XX', 'Sum', 'GAUGE', 0, True),
MetricInfo('BackendConnectionErrors', 'Sum', 'GAUGE', 0, True),
MetricInfo('SurgeQueueLength', 'Maximum', 'GAUGE', 0, True),
MetricInfo('SpilloverCount', 'Sum', 'GAUGE', 0, True)
]
def process_config(self):
super(ElbCollector, self).process_config()
if str_to_bool(self.config['enabled']):
self.interval = self.config.as_int('interval')
# Why is this?
if self.interval % 60 != 0:
raise Exception('Interval must be a multiple of 60 seconds: %s'
% self.interval)
if (('access_key_id' in self.config and
'secret_access_key' in self.config)):
self.auth_kwargs = {
'aws_access_key_id': self.config['access_key_id'],
'aws_secret_access_key': self.config['secret_access_key']
}
else:
# If creds not present, assume we're using IAM roles with
# instance profiles. Boto will automatically take care of using
# the creds from the instance metatdata.
self.auth_kwargs = {}
def check_boto(self):
if not cloudwatch:
self.log.error("boto module not found!")
return False
return True
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElbCollector, self).get_default_config()
config.update({
'path': 'elb',
'regions': ['us-west-1'],
'interval': 60,
'format': '$zone.$elb_name.$metric_name',
})
return config
def publish_delayed_metric(self, name, value, timestamp, raw_value=None,
precision=0, metric_type='GAUGE',
instance=None):
"""
Metrics may not be immediately available when querying cloudwatch.
Hence, allow the ability to publish a metric from some the past given
its timestamp.
"""
# Get metric Path
path = self.get_metric_path(name, instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
metric = Metric(path, value, raw_value=raw_value, timestamp=timestamp,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
# Publish Metric
self.publish_metric(metric)
def get_elb_names(self, region, config):
"""
:param region: name of a region
:param config: Collector config dict
:return: list of elb names to query in the given region
"""
# This function is ripe to be memoized but when ELBs are added/removed
# dynamically over time, diamond will have to be restarted to pick
# up the changes.
region_dict = config.get('regions', {}).get(region, {})
if 'elb_names' not in region_dict:
elb_conn = boto.ec2.elb.connect_to_region(region,
**self.auth_kwargs)
full_elb_names = \
[elb.name for elb in elb_conn.get_all_load_balancers()]
# Regular expressions for ELBs we DO NOT want to get metrics on.
matchers = \
[re.compile(regex) for regex in config.get('elbs_ignored', [])]
# cycle through elbs get the list of elbs that don't match
elb_names = []
for elb_name in full_elb_names:
if matchers and any([m.match(elb_name) for m in matchers]):
continue
elb_names.append(elb_name)
else:
elb_names = region_dict['elb_names']
return elb_names
def process_stat(self, region, zone, elb_name, metric, stat, end_time):
template_tokens = {
'region': region,
'zone': zone,
'elb_name': elb_name,
'metric_name': metric.name,
}
name_template = Template(self.config['format'])
formatted_name = name_template.substitute(template_tokens)
self.publish_delayed_metric(
formatted_name,
stat[metric.aws_type],
metric_type=metric.diamond_type,
precision=metric.precision,
timestamp=time.mktime(utc_to_local(end_time).timetuple()))
def process_metric(self, region_cw_conn, zone, start_time, end_time,
elb_name, metric):
stats = region_cw_conn.get_metric_statistics(
self.config['interval'],
start_time,
end_time,
metric.name,
namespace='AWS/ELB',
statistics=[metric.aws_type],
dimensions={
'LoadBalancerName': elb_name,
'AvailabilityZone': zone
})
# create a fake stat if the current metric should default to zero when
# a stat is not returned. Cloudwatch just skips the metric entirely
# instead of wasting space to store/emit a zero.
if len(stats) == 0 and metric.default_to_zero:
stats.append({
u'Timestamp': start_time,
metric.aws_type: 0.0,
u'Unit': u'Count'
})
for stat in stats:
self.process_stat(region_cw_conn.region.name, zone, elb_name,
metric, stat, end_time)
def process_elb(self, region_cw_conn, zone,
start_time, end_time, elb_name):
for metric in self.metrics:
self.process_metric(region_cw_conn, zone, start_time, end_time,
elb_name, metric)
def process_zone(self, region_cw_conn, zone, start_time, end_time):
for elb_name in self.get_elb_names(region_cw_conn.region.name,
self.config):
self.process_elb(region_cw_conn, zone, start_time, end_time,
elb_name)
def process_region(self, region_cw_conn, start_time, end_time):
threads = []
for zone in get_zones(region_cw_conn.region.name, self.auth_kwargs):
# Create a new connection for each thread, Boto isn't threadsafe.
t_conn = cloudwatch.connect_to_region(region_cw_conn.region.name,
**self.auth_kwargs)
zone_thread = threading.Thread(target=self.process_zone,
args=(t_conn, zone,
start_time, end_time))
zone_thread.start()
threads.append(zone_thread)
# Make sure all threads have completed. Also allows scheduler to work
# more 'correctly', because without this, the collector will 'complete'
# in about 7ms.
for thread in threads:
thread.join()
def collect(self):
if not self.check_boto():
return
now = datetime.datetime.utcnow()
end_time = now.replace(second=0, microsecond=0)
start_time = end_time - datetime.timedelta(seconds=self.interval)
for region in self.config['regions'].keys():
region_cw_conn = cloudwatch.connect_to_region(region,
**self.auth_kwargs)
self.process_region(region_cw_conn, start_time, end_time)
| |
#!/usr/bin/env python
"""
Notes on writing new test files:
--------------------------------
A test file can contain anything, but usually it is similar to a regular input
file (defining a test problem), with a mandatory Test class. This class holds
all the test_* functions, as well as the from_conf(), which serves to
initialize the test (conf is in fact the test file itself, options are
command-line options).
All variables defined in a test file are collected in 'conf' variable passed to
a Test.__init__(). For example, 'input_name' in test_input_*.py files is
accessible as 'conf.input_name'. This is usefull if the test class is defined
outside the test file, as the classes in tests_basic.py are.
The test_* functions are collected automatically by run_tests.py, with one
exception: if a certain order of their evaluation is required, a class
attribute 'test' of the Test class with a list of the test function names
should be defined (example: test_meshio.py)."""
import sys
import time
import os
import os.path as op
from optparse import OptionParser
import sfepy
from sfepy.base.conf import ProblemConf, get_standard_keywords
class OutputFilter(object):
def __init__(self, allowed_lines):
self.allowed_lines = allowed_lines
self.msg_type1 = ['...', '!!!', '+++', '---']
self.msg_type2 = ['<<<', '>>>']
self.start()
def start(self):
self.stdout = sys.stdout
sys.stdout = self
def write(self, msg):
if self.stdout is not None:
msg_type = msg[:3]
if msg_type in self.allowed_lines:
if msg_type in self.msg_type1:
msg = ''.join((msg[:3], ' ', msg[3:]))
elif msg_type in self.msg_type2:
msg = msg[4:]
self.stdout.write(msg)
self.stdout.write('\n')
def stop(self):
sys.stdout = self.stdout
self.stdout = None
def run_test(conf_name, options):
try:
os.makedirs(options.out_dir)
except OSError, e:
if e.errno != 17: # [Errno 17] File exists
raise
if options.filter_none or options.debug:
of = None
elif options.filter_less:
of = OutputFilter(['<<<', '>>>', '...', '!!!', '+++', '---'])
elif options.filter_more:
of = OutputFilter(['+++', '---'])
else:
of = OutputFilter(['<<<', '+++', '---'])
print '<<< %s' % conf_name
_required, other = get_standard_keywords()
required = ['Test']
num = 1
test_time = 0.0
try:
conf = ProblemConf.from_file(conf_name, required, _required + other)
test = conf.funmod.Test.from_conf(conf, options)
num = test.get_number()
ok = True
print '>>> test instance prepared (%d test(s))' % num
except KeyboardInterrupt:
print '>>> interrupted'
sys.exit(0)
except:
print '--- test instance creation failed'
if options.debug:
raise
ok, n_fail, n_total = False, num, num
if ok:
try:
tt = time.clock()
ok, n_fail, n_total = test.run(options.debug)
test_time = time.clock() - tt
except KeyboardInterrupt:
print '>>> interrupted'
sys.exit(0)
except Exception, e:
print '>>> %s' % e.__class__
if options.debug:
raise
ok, n_fail, n_total = False, num, num
if ok:
print '>>> all passed in %.2f s' % test_time
else:
print '!!! %s test failed' % n_fail
if of is not None:
of.stop()
return n_fail, n_total, test_time
def wrap_run_tests(options):
def run_tests(stats, dir_name, filenames):
filenames = [filename for filename in sorted(filenames)
if (len(filename) > 8) and
filename[:5] == 'test_' and filename[-3:] == '.py']
print '<<< directory: %s, test files: %d' % (dir_name, len(filenames))
for filename in filenames:
conf_name = op.join(dir_name, filename)
n_fail, n_total, test_time = run_test(conf_name, options)
stats[0] += 1
stats[1] += n_fail
stats[2] += n_total
stats[3] += test_time
return run_tests
def get_dir(default):
if sfepy.in_source_tree:
out = default
else:
out = op.normpath(op.join(sfepy.data_dir, default))
return out
usage = """%prog [options] [test_filename[ test_filename ...]]"""
help = {
'dir' : 'directory with tests [default: %default]',
'out_dir' : 'directory for storing test results and temporary files'
' [default: %default]',
'debug' : 'raise silenced exceptions to see what was wrong',
'filter-none' : 'do not filter any messages',
'filter-less' : 'filter output (suppress all except test messages)',
'filter-more' : 'filter output (suppress all except test result messages)',
'print-doc' : 'print the docstring of this file (howto write new tests)',
}
def main():
parser = OptionParser(usage=usage, version="%prog " + sfepy.__version__)
parser.add_option("", "--print-doc",
action="store_true", dest="print_doc",
default=False, help=help['print-doc'])
parser.add_option("-d", "--dir", metavar='directory',
action="store", dest="test_dir",
default=get_dir('tests'),
help=help['dir'])
parser.add_option("-o", "--output", metavar='directory',
action="store", dest="out_dir",
default=get_dir('output-tests'),
help=help['out_dir'])
parser.add_option("", "--debug",
action="store_true", dest="debug",
default=False, help=help['debug'])
parser.add_option("", "--filter-none",
action="store_true", dest="filter_none",
default=False, help=help['filter-none'])
parser.add_option("", "--filter-less",
action="store_true", dest="filter_less",
default=False, help=help['filter-less'])
parser.add_option("", "--filter-more",
action="store_true", dest="filter_more",
default=False, help=help['filter-more'])
options, args = parser.parse_args()
if options.print_doc:
print __doc__
return
run_tests = wrap_run_tests(options)
stats = [0, 0, 0, 0.0]
if len(args) >= 1:
for test_filename in args:
dirname, filename = op.split(test_filename)
run_tests(stats, dirname, [filename])
else:
op.walk(options.test_dir, run_tests, stats)
print '%d test file(s) executed in %.2f s, %d failure(s) of %d test(s)'\
% (stats[0], stats[3], stats[1], stats[2])
if __name__ == '__main__':
main()
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import rnn
from seq2seq.encoders.encoder import Encoder, EncoderOutput
from seq2seq.training import utils as training_utils
from seq2seq.contrib.rnn_cell import AttentionRNNCell
def _unpack_cell(cell):
"""Unpack the cells because the stack_bidirectional_dynamic_rnn
expects a list of cells, one per layer."""
if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
return cell._cells # pylint: disable=W0212
else:
return [cell]
def _default_rnn_cell_params():
"""Creates default parameters used by multiple RNN encoders.
"""
return {
"cell_class": "BasicLSTMCell",
"cell_params": {
"num_units": 128
},
"dropout_input_keep_prob": 1.0,
"dropout_output_keep_prob": 1.0,
"num_layers": 1,
"residual_connections": False,
"residual_combiner": "add",
"residual_dense": False
}
def _toggle_dropout(cell_params, mode):
"""Disables dropout during eval/inference mode
"""
cell_params = copy.deepcopy(cell_params)
if mode != tf.contrib.learn.ModeKeys.TRAIN:
cell_params["dropout_input_keep_prob"] = 1.0
cell_params["dropout_output_keep_prob"] = 1.0
return cell_params
class UnidirectionalRNNEncoder(Encoder):
"""
A unidirectional RNN encoder. Stacking should be performed as
part of the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="forward_rnn_encoder"):
super(UnidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, state = tf.nn.dynamic_rnn(
cell=cell,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
return EncoderOutput(
outputs=outputs,
final_state=state,
attention_values=outputs,
attention_values_length=sequence_length)
class BidirectionalRNNEncoder(Encoder):
"""
A bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="bidi_rnn_encoder"):
super(BidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
# Concatenate outputs and states of the forward and backward RNNs
outputs_concat = tf.concat(outputs, 2)
return EncoderOutput(
outputs=outputs_concat,
final_state=states,
attention_values=outputs_concat,
attention_values_length=sequence_length)
class StackBidirectionalRNNEncoder(Encoder):
"""
A stacked bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="stacked_bidi_rnn_encoder"):
super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cells_fw = _unpack_cell(cell_fw)
cells_bw = _unpack_cell(cell_bw)
result = rnn.stack_bidirectional_dynamic_rnn(
cells_fw=cells_fw,
cells_bw=cells_bw,
inputs=inputs,
dtype=tf.float32,
sequence_length=sequence_length,
**kwargs)
outputs_concat, _output_state_fw, _output_state_bw = result
final_state = (_output_state_fw, _output_state_bw)
return EncoderOutput(
outputs=outputs_concat,
final_state=final_state,
attention_values=outputs_concat,
attention_values_length=sequence_length)
class DynamicDirectionalRNNEncoder(Encoder):
"""
A bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="dyna_rnn_encoder"):
super(DynamicDirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
self.max_sequence_length = self.params[
"source_max_seq_len"] # TODO: Different length for input and context vectors?
self.positional_embedding_size = self.params["positional_embedding_size"]
self.attention_num_layers = self.params["attention_num_layers"]
self.attention_num_units = self.params["attention_num_units"]
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"source_max_seq_len": 50,
"positional_embedding_size": 32,
"attention_num_layers": 3,
"attention_num_units": 32,
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
embedding_size = inputs.get_shape().as_list()[-1] # TODO: Different size for words and context
self.params["rnn_cell"]["cell_params"]["num_units"] = embedding_size + self.positional_embedding_size
inner_cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell = AttentionRNNCell(inner_cell,
embedding_size,
self.positional_embedding_size,
self.attention_num_layers,
self.attention_num_units)
positional_embeddings_var = tf.get_variable("positional_embeddings",
[self.max_sequence_length, self.positional_embedding_size],
dtype=tf.float32) # TODO: Make dtype configurable
position_sequence = tf.range(tf.shape(inputs)[1])
positional_embeddings = tf.nn.embedding_lookup(positional_embeddings_var, position_sequence)
positional_embeddings = tf.expand_dims(positional_embeddings, axis=0)
positional_embeddings_for_batch = tf.tile(positional_embeddings, [tf.shape(inputs)[0], 1, 1])
initial_state_0 = tf.zeros([tf.shape(inputs)[0], inner_cell.state_size])
initial_state_1 = tf.concat([inputs, positional_embeddings_for_batch], axis=2)
initial_state_2 = tf.concat([inputs, positional_embeddings_for_batch], axis=2)
initial_state = (initial_state_0, initial_state_1, initial_state_2)
outputs, state = tf.nn.dynamic_rnn(
cell=cell,
inputs=tf.zeros([tf.shape(inputs)[0], tf.shape(inputs)[1] * 1, 1], tf.float32),
# Todo : Make this * 1 configurable
initial_state=initial_state,
sequence_length=sequence_length * 1, # Todo : Make this 1 configurable
dtype=tf.float32,
**kwargs)
return EncoderOutput(
outputs=state[2],
final_state=state[0],
attention_values=state[2],
attention_values_length=tf.ones([tf.shape(inputs)[0]], dtype=tf.int32) * tf.shape(inputs)[1])
| |
'''
This Python program parses forms to extract the field names and values.
Name: Parse Forms
Creator: Matt Gagnon <mattjgagnon@gmail.com>
Created: 2012-05-04
Revised:
Version: 1.0
Python: 2.6
To do:
Execution: fetch a web page with form(s) on it
use BeautifulSoup to parse the form inputs and values
return a dictionary object of the information collected
optionally display the field object
'''
# =============
# CONFIGURATION
# =============
# url = 'http://www.crmtool.net/WebForm.asp?W=1282&F=1248' # for testing
url = 'http://google.com' # for testing
debug = True # for testing - prints the form tags and field inputs
# =========
# FUNCTIONS
# =========
def parseForms(formUrl, labelWrapper = '', display = False):
'''
This function parses form data from the supplied url and returns all
forms and their inputs on the page as a dictionary.
The function uses a third-party module named BeautifulSoup.
Accepts a valid url and a variable for printing the fields.
'''
# import the url modules
import urllib, urllib2
# import our html parser
from BeautifulSoup import BeautifulSoup
# create a dictionary object to hold the forms
data = []
fieldInputs = ['input', 'select', 'textarea']
try:
# request url and read server response
page = urllib2.urlopen(formUrl).read()
except urllib2.URLError, e:
# check if a reason was given
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason:', e.reason
# check if there was a code given
elif hasattr(e, 'code'):
print 'The server could not fulfill the request.'
print 'Error code:', e.code
except ValueError, e:
print 'unexpected value for URL'
else:
try:
# try to open the data source
tree = BeautifulSoup(page)
except:
# exception occurred
print 'could not open the page'
else:
# find all the forms
forms = tree.findAll('form')
# extract and remove all script tags in the form
# as they tend to mess up the parsing of values
[script.extract() for script in tree('script')]
action = ''
method = ''
# check if forms exist
if (forms != ''):
for form in forms:
# check if the form has a method attribute
if any('method' in s for s in form.attrs):
method = form['method']
# check if the form has an action attribute
if any('action' in s for s in form.attrs):
action = form['action']
# get the label associated with the form inputs
fieldLabel = getLabel(form, labelWrapper)
# print the form start tag
if display:
printFormStart(action, method)
# loop through each form input type
for superType in fieldInputs:
# loop through each field input
for field in tree.findAll(superType):
# create a dictionary object of field data
fieldData = {}
# get a name input attribute, if one exists
fieldName = getName(field)
# assign the input type (text, checkbox, radio, etc.)
# based on the given super type (input, select, textarea)
fieldType = getType(field, superType)
# check if the input is a standard input
if superType == 'input':
fieldValue = ''
# check if the input has a value attribute
if any('value' in s for s in field.attrs):
fieldValue = field['value']
# check if the input is a select
elif superType == 'select':
# create a new dictionary object to hold options
fieldValue = {}
# loop through the options
for option in field.contents:
try:
# try to assign the value to the key
# option.text is visible form text
fieldValue[option.text] = option['value']
except KeyError, e:
if option.text:
fieldValue[option.text] = option.text
except AttributeError, e:
pass
except TypeError, e:
pass
# check if the input is a textarea
elif superType == 'textarea':
# assign the tag text
fieldValue = field.text
# assign the attribute data to the field
fieldData = {
'label' : fieldLabel,
'name' : fieldName,
'type' : fieldType,
'value' : fieldValue}
if display:
# print the field input HTML (mainly for testing)
printField(fieldLabel, fieldName, fieldType, fieldValue)
# check if the data exists and append
if fieldData:
data.append(fieldData)
# print the form start tag
if display:
printFormEnd()
else:
# no forms exist
print 'no forms found on the page'
# return the forms
return data
def getLabel(form = '', labelWrapper = ''):
'''
Need to write this function.
This function gets the label for a form input control.
It will search the obvious places first, like <label> and <td>,
but will also check text surrounding the form input.
'''
if labelWrapper == '':
labelWrapper = 'label'
labels = []
fieldLabel = ''
print '--- all labels ---'
for fieldLabel in form.findAll(labelWrapper):
if fieldLabel.text != ' ':
labels.append(fieldLabel.text)
print fieldLabel.text
# print labels
return fieldLabel
def getName(field):
'''
This function accepts a field input object and returns the name attribute.
'''
# check if the field has a name attribute
if any('name' in s for s in field.attrs):
fieldName = field['name']
else:
fieldName = ''
return fieldName
def getType(field, superType):
'''
This function accepts a field input object and returns the type attribute.
'''
# check if type is not in field attributes
if superType == 'input':
if field.find('type') != '':
fieldType = field['type']
else:
fieldType = 'text'
elif superType == 'select':
fieldType = 'select'
else:
fieldType = 'textarea'
return fieldType
def printFormEnd():
'''
This function simply prints an ending HTML form tag.
'''
print '</form>'
def printField(fieldLabel, fieldName, fieldType, fieldValue):
'''
This is a general function for printing form inputs.
'''
if fieldType == 'select':
printSelect(fieldLabel, fieldName, fieldValue)
elif fieldType == 'textarea':
printTextarea(fieldLabel, fieldName, fieldValue)
else:
printInput(fieldLabel, fieldName, fieldType, fieldValue)
def printFormStart(action = '', method = 'post'):
'''
This function accepts an optional form action and method and prints
them in an HTML form tag.
'''
print '<form action="'+action+'" method="'+method+'">'
def printInput(fieldLabel, fieldName, fieldType, fieldValue):
'''
This function accepts name, type, and value field input attributes
and prints them in an HTML input tag.
'''
if fieldType == 'hidden':
print '<input name="'+fieldName+'" type="'+fieldType+'" value="'+fieldValue+'">'
else:
printLabelStart(fieldLabel)
print '<input name="'+fieldName+'" type="'+fieldType+'" value="'+fieldValue+'">'
printLabelEnd()
def printLabelEnd():
'''
This function simply prints an ending HTML label tag.
'''
print '</label>'
print ''
def printLabelStart(labelText = ''):
'''
This function accepts an optional field input label and prints
an HTML label tag.
'''
print '<label>',labelText
def printSelect(fieldLabel, fieldName, fieldValue):
'''
This function accepts name and options field input attributes and
prints then in an HTML select tag.
'''
printLabelStart(fieldLabel)
print '<select name="'+fieldName+'">'
for option in sorted(fieldValue.items()):
print '<option value="'+option[1]+'">'+option[0]+'</option>'
print '</select>'
printLabelEnd()
def printTextarea(fieldLabel, fieldName, fieldValue):
'''
This function accepts name and value field input attributes and
prints them in an HTML textarea tag.
'''
printLabelStart(fieldLabel)
print '<textarea name="'+fieldName+'">'+fieldValue+'</textarea>'
printLabelEnd()
# ============
# MAIN PROGRAM
# ============
#formData = parseForms(url, labelWrapper, debug)
formData = parseForms(url, 'td')
print formData
| |
import os
import datetime
from nose.tools import eq_
import mock
import amo
import amo.tests
from addons import cron
from addons.models import Addon, AppSupport
from django.core.management.base import CommandError
from files.models import File, Platform
from lib.es.management.commands.reindex import flag_database, unflag_database
from stats.models import UpdateCount
from versions.models import Version
class CurrentVersionTestCase(amo.tests.TestCase):
fixtures = ['base/addon_3615']
@mock.patch('waffle.switch_is_active', lambda x: True)
def test_addons(self):
Addon.objects.filter(pk=3615).update(_current_version=None)
eq_(Addon.objects.filter(_current_version=None, pk=3615).count(), 1)
cron._update_addons_current_version(((3615,),))
eq_(Addon.objects.filter(_current_version=None, pk=3615).count(), 0)
@mock.patch('waffle.switch_is_active', lambda x: True)
def test_cron(self):
Addon.objects.filter(pk=3615).update(_current_version=None)
eq_(Addon.objects.filter(_current_version=None, pk=3615).count(), 1)
cron.update_addons_current_version()
eq_(Addon.objects.filter(_current_version=None, pk=3615).count(), 0)
class TestLastUpdated(amo.tests.TestCase):
fixtures = ['base/addon_3615', 'addons/listed', 'base/apps',
'addons/persona', 'base/seamonkey', 'base/thunderbird']
def test_personas(self):
Addon.objects.update(type=amo.ADDON_PERSONA, status=amo.STATUS_PUBLIC)
cron.addon_last_updated()
for addon in Addon.objects.all():
eq_(addon.last_updated, addon.created)
# Make sure it's stable.
cron.addon_last_updated()
for addon in Addon.objects.all():
eq_(addon.last_updated, addon.created)
def test_catchall(self):
"""Make sure the catch-all last_updated is stable and accurate."""
# Nullify all datestatuschanged so the public add-ons hit the
# catch-all.
(File.objects.filter(status=amo.STATUS_PUBLIC)
.update(datestatuschanged=None))
Addon.objects.update(last_updated=None)
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC,
type=amo.ADDON_EXTENSION):
eq_(addon.last_updated, addon.created)
# Make sure it's stable.
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):
eq_(addon.last_updated, addon.created)
def test_last_updated_lite(self):
# Make sure lite addons' last_updated matches their file's
# datestatuschanged.
Addon.objects.update(status=amo.STATUS_LITE, last_updated=None)
File.objects.update(status=amo.STATUS_LITE)
cron.addon_last_updated()
addon = Addon.objects.get(id=3615)
files = File.objects.filter(version__addon=addon)
eq_(len(files), 1)
eq_(addon.last_updated, files[0].datestatuschanged)
assert addon.last_updated
def test_last_update_lite_no_files(self):
Addon.objects.update(status=amo.STATUS_LITE, last_updated=None)
File.objects.update(status=amo.STATUS_UNREVIEWED)
cron.addon_last_updated()
addon = Addon.objects.get(id=3615)
eq_(addon.last_updated, addon.created)
assert addon.last_updated
def test_appsupport(self):
ids = Addon.objects.values_list('id', flat=True)
cron._update_appsupport(ids)
eq_(AppSupport.objects.filter(app=amo.FIREFOX.id).count(), 4)
# Run it again to test deletes.
cron._update_appsupport(ids)
eq_(AppSupport.objects.filter(app=amo.FIREFOX.id).count(), 4)
def test_appsupport_listed(self):
AppSupport.objects.all().delete()
eq_(AppSupport.objects.filter(addon=3723).count(), 0)
cron.update_addon_appsupport()
eq_(AppSupport.objects.filter(addon=3723,
app=amo.FIREFOX.id).count(), 0)
def test_appsupport_seamonkey(self):
addon = Addon.objects.get(pk=15663)
addon.update(status=amo.STATUS_PUBLIC)
AppSupport.objects.all().delete()
cron.update_addon_appsupport()
eq_(AppSupport.objects.filter(addon=15663,
app=amo.SEAMONKEY.id).count(), 1)
class TestHideDisabledFiles(amo.tests.TestCase):
msg = 'Moving disabled file: %s => %s'
def setUp(self):
p = Platform.objects.create(id=amo.PLATFORM_ALL.id)
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.version = Version.objects.create(addon=self.addon)
self.f1 = File.objects.create(version=self.version, platform=p,
filename='f1')
self.f2 = File.objects.create(version=self.version, filename='f2',
platform=p)
@mock.patch('files.models.os')
def test_leave_nondisabled_files(self, os_mock):
# All these addon/file status pairs should stay.
stati = [(amo.STATUS_PUBLIC, amo.STATUS_PUBLIC),
(amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED),
(amo.STATUS_PUBLIC, amo.STATUS_BETA),
(amo.STATUS_LITE, amo.STATUS_UNREVIEWED),
(amo.STATUS_LITE, amo.STATUS_LITE),
(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_UNREVIEWED),
(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_LITE)]
for addon_status, file_status in stati:
self.addon.update(status=addon_status)
File.objects.update(status=file_status)
cron.hide_disabled_files()
assert not os_mock.path.exists.called, (addon_status, file_status)
@mock.patch('files.models.File.mv')
@mock.patch('files.models.storage')
def test_move_user_disabled_addon(self, m_storage, mv_mock):
# Use Addon.objects.update so the signal handler isn't called.
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_PUBLIC, disabled_by_user=True)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f2 was moved.
f2 = self.f2
mv_mock.assert_called_with(f2.file_path, f2.guarded_file_path,
self.msg)
m_storage.delete.assert_called_with(f2.mirror_file_path)
# Check that f1 was moved as well.
f1 = self.f1
mv_mock.call_args = mv_mock.call_args_list[0]
m_storage.delete.call_args = m_storage.delete.call_args_list[0]
mv_mock.assert_called_with(f1.file_path, f1.guarded_file_path,
self.msg)
m_storage.delete.assert_called_with(f1.mirror_file_path)
# There's only 2 files, both should have been moved.
eq_(mv_mock.call_count, 2)
eq_(m_storage.delete.call_count, 2)
@mock.patch('files.models.File.mv')
@mock.patch('files.models.storage')
def test_move_admin_disabled_addon(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_DISABLED)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f2 was moved.
f2 = self.f2
mv_mock.assert_called_with(f2.file_path, f2.guarded_file_path,
self.msg)
m_storage.delete.assert_called_with(f2.mirror_file_path)
# Check that f1 was moved as well.
f1 = self.f1
mv_mock.call_args = mv_mock.call_args_list[0]
m_storage.delete.call_args = m_storage.delete.call_args_list[0]
mv_mock.assert_called_with(f1.file_path, f1.guarded_file_path,
self.msg)
m_storage.delete.assert_called_with(f1.mirror_file_path)
# There's only 2 files, both should have been moved.
eq_(mv_mock.call_count, 2)
eq_(m_storage.delete.call_count, 2)
@mock.patch('files.models.File.mv')
@mock.patch('files.models.storage')
def test_move_disabled_file(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(status=amo.STATUS_LITE)
File.objects.filter(id=self.f1.id).update(status=amo.STATUS_DISABLED)
File.objects.filter(id=self.f2.id).update(status=amo.STATUS_UNREVIEWED)
cron.hide_disabled_files()
# Only f1 should have been moved.
f1 = self.f1
mv_mock.assert_called_with(f1.file_path, f1.guarded_file_path,
self.msg)
eq_(mv_mock.call_count, 1)
# It should have been removed from mirror stagins.
m_storage.delete.assert_called_with(f1.mirror_file_path)
eq_(m_storage.delete.call_count, 1)
class AvgDailyUserCountTestCase(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def test_adu_is_adjusted_in_cron(self):
addon = Addon.objects.get(pk=3615)
self.assertTrue(
addon.average_daily_users > addon.total_downloads + 10000,
'Unexpected ADU count. ADU of %d not greater than %d' % (
addon.average_daily_users, addon.total_downloads + 10000))
cron._update_addon_average_daily_users([(3615, 6000000)])
addon = Addon.objects.get(pk=3615)
eq_(addon.average_daily_users, addon.total_downloads)
def test_adu_flag(self):
addon = Addon.objects.get(pk=3615)
now = datetime.datetime.now()
counter = UpdateCount.objects.create(addon=addon, date=now,
count=1234)
counter.save()
self.assertTrue(
addon.average_daily_users > addon.total_downloads + 10000,
'Unexpected ADU count. ADU of %d not greater than %d' % (
addon.average_daily_users, addon.total_downloads + 10000))
adu = cron.update_addon_average_daily_users
flag_database('new', 'old', 'alias')
try:
# Should fail.
self.assertRaises(CommandError, adu)
# Should work with the environ flag.
os.environ['FORCE_INDEXING'] = '1'
adu()
finally:
unflag_database()
del os.environ['FORCE_INDEXING']
addon = Addon.objects.get(pk=3615)
eq_(addon.average_daily_users, 1234)
class TestReindex(amo.tests.ESTestCase):
@mock.patch('addons.models.update_search_index', new=mock.Mock)
def setUp(self):
self.addons = []
self.apps = []
for x in xrange(3):
self.addons.append(amo.tests.addon_factory())
self.apps.append(amo.tests.app_factory())
def test_job(self):
cron.reindex_addons()
self.refresh()
eq_(sorted(a.id for a in Addon.search()),
sorted(a.id for a in self.apps + self.addons))
| |
#!/usr/bin/env python
"""usage: %prog [options] filename
Parse a document to a tree, with optional profiling
"""
import sys
import os
import traceback
from optparse import OptionParser
from html5lib import html5parser, sanitizer
from html5lib.tokenizer import HTMLTokenizer
from html5lib import treebuilders, serializer, treewalkers
from html5lib import constants
from html5lib import utils
def parse():
optParser = getOptParser()
opts,args = optParser.parse_args()
encoding = 'utf8'
try:
f = args[-1]
# Try opening from the internet
if f.startswith('http://'):
try:
import urllib.request, urllib.parse, urllib.error, cgi
f = urllib.request.urlopen(f)
contentType = f.headers.get('content-type')
if contentType:
(mediaType, params) = cgi.parse_header(contentType)
encoding = params.get('charset')
except:
pass
elif f == '-':
f = sys.stdin
if sys.version_info[0] >= 3:
encoding = None
else:
try:
# Try opening from file system
f = open(f, 'rb')
except IOError as e:
sys.stderr.write('Unable to open file: %s\n' % e)
sys.exit(1)
except IndexError:
sys.stderr.write('No filename provided. Use -h for help\n')
sys.exit(1)
treebuilder = treebuilders.getTreeBuilder(opts.treebuilder)
if opts.sanitize:
tokenizer = sanitizer.HTMLSanitizer
else:
tokenizer = HTMLTokenizer
p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer, debug=opts.log)
if opts.fragment:
parseMethod = p.parseFragment
else:
parseMethod = p.parse
if opts.profile:
import cProfile
import pstats
cProfile.runctx('run(parseMethod, f, encoding)', None,
{'run': run,
'parseMethod': parseMethod,
'f': f,
'encoding': encoding},
'stats.prof')
# XXX - We should use a temp file here
stats = pstats.Stats('stats.prof')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats()
elif opts.time:
import time
t0 = time.time()
document = run(parseMethod, f, encoding)
t1 = time.time()
if document:
printOutput(p, document, opts)
t2 = time.time()
sys.stderr.write('\n\nRun took: %fs (plus %fs to print the output)'%(t1-t0, t2-t1))
else:
sys.stderr.write('\n\nRun took: %fs'%(t1-t0))
else:
document = run(parseMethod, f, encoding)
if document:
printOutput(p, document, opts)
def run(parseMethod, f, encoding):
try:
document = parseMethod(f, encoding=encoding)
except:
document = None
traceback.print_exc()
return document
def printOutput(parser, document, opts):
if opts.encoding:
print('Encoding:', parser.tokenizer.stream.charEncoding)
for item in parser.log:
print(item)
if document is not None:
if opts.xml:
tb = opts.treebuilder.lower()
if tb == 'dom':
document.writexml(sys.stdout, encoding='utf-8')
elif tb == 'lxml':
import lxml.etree
sys.stdout.write(lxml.etree.tostring(document))
elif tb == 'etree':
sys.stdout.write(utils.default_etree.tostring(document))
elif opts.tree:
if not hasattr(document,'__getitem__'):
document = [document]
for fragment in document:
print(parser.tree.testSerializer(fragment))
elif opts.hilite:
sys.stdout.write(document.hilite('utf-8'))
elif opts.html:
kwargs = {}
for opt in serializer.HTMLSerializer.options:
try:
kwargs[opt] = getattr(opts,opt)
except:
pass
if not kwargs['quote_char']:
del kwargs['quote_char']
tokens = treewalkers.getTreeWalker(opts.treebuilder)(document)
if sys.version_info[0] >= 3:
encoding = None
else:
encoding = 'utf-8'
for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding):
sys.stdout.write(text)
if not text.endswith('\n'): sys.stdout.write('\n')
if opts.error:
errList=[]
for pos, errorcode, datavars in parser.errors:
errList.append('Line %i Col %i'%pos + ' ' + constants.E.get(errorcode, 'Unknown error "%s"' % errorcode) % datavars)
sys.stdout.write('\nParse errors:\n' + '\n'.join(errList)+'\n')
def getOptParser():
parser = OptionParser(usage=__doc__)
parser.add_option('-p', '--profile', action='store_true', default=False,
dest='profile', help='Use the hotshot profiler to '
'produce a detailed log of the run')
parser.add_option('-t', '--time',
action='store_true', default=False, dest='time',
help='Time the run using time.time (may not be accurate on all platforms, especially for short runs)')
parser.add_option('-b', '--treebuilder', action='store', type='string',
dest='treebuilder', default='etree')
parser.add_option('-e', '--error', action='store_true', default=False,
dest='error', help='Print a list of parse errors')
parser.add_option('-f', '--fragment', action='store_true', default=False,
dest='fragment', help='Parse as a fragment')
parser.add_option('', '--tree', action='store_true', default=False,
dest='tree', help='Output as debug tree')
parser.add_option('-x', '--xml', action='store_true', default=False,
dest='xml', help='Output as xml')
parser.add_option('', '--no-html', action='store_false', default=True,
dest='html', help="Don't output html")
parser.add_option('', '--hilite', action='store_true', default=False,
dest='hilite', help='Output as formatted highlighted code.')
parser.add_option('-c', '--encoding', action='store_true', default=False,
dest='encoding', help='Print character encoding used')
parser.add_option('', '--inject-meta-charset', action='store_true',
default=False, dest='inject_meta_charset',
help='inject <meta charset>')
parser.add_option('', '--strip-whitespace', action='store_true',
default=False, dest='strip_whitespace',
help='strip whitespace')
parser.add_option('', '--omit-optional-tags', action='store_true',
default=False, dest='omit_optional_tags',
help='omit optional tags')
parser.add_option('', '--quote-attr-values', action='store_true',
default=False, dest='quote_attr_values',
help='quote attribute values')
parser.add_option('', '--use-best-quote-char', action='store_true',
default=False, dest='use_best_quote_char',
help='use best quote character')
parser.add_option('', '--quote-char', action='store',
default=None, dest='quote_char',
help='quote character')
parser.add_option('', '--no-minimize-boolean-attributes',
action='store_false', default=True,
dest='minimize_boolean_attributes',
help='minimize boolean attributes')
parser.add_option('', '--use-trailing-solidus', action='store_true',
default=False, dest='use_trailing_solidus',
help='use trailing solidus')
parser.add_option('', '--space-before-trailing-solidus',
action='store_true', default=False,
dest='space_before_trailing_solidus',
help='add space before trailing solidus')
parser.add_option('', '--escape-lt-in-attrs', action='store_true',
default=False, dest='escape_lt_in_attrs',
help='escape less than signs in attribute values')
parser.add_option('', '--escape-rcdata', action='store_true',
default=False, dest='escape_rcdata',
help='escape rcdata element values')
parser.add_option('', '--sanitize', action='store_true', default=False,
dest='sanitize', help='sanitize')
parser.add_option('-l', '--log', action='store_true', default=False,
dest='log', help='log state transitions')
return parser
if __name__ == '__main__':
parse()
| |
# This is a minimal implementation of the CloudSigma resources
# TODO for a full CS implementation define resources for all API resrources + the attachment points of disks and nics.
import cloudsigma
import time # to sleep
import json # to parse the meta so it can be added as an object
import base64 # for cloudinit user-data encoding
from heat.engine import properties
from heat.engine import resource
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class CloudSigmaCompute(resource.Resource):
PROPERTIES = (
API_ENDPOINT, USERNAME, PASSWORD,
INSTANCE_NAME, MEM_SIZE, CPU_MHZ, VNC_PASSWORD,
META, DESCRIPTION, CLOUDINIT_USER_DATA, SSH_PUBLIC_KEY,
DRIVE_CLONE_UUID, DRIVE_CLONE_RESIZE, DRIVE_UUID,
NET_IP_UUIDS, NET_VLAN_UUIDS
) = (
'api_endpoint', 'username', 'password',
'instance_name', 'mem_size', 'cpu_mhz', 'vnc_password',
'meta', 'description', 'cloudinit_user_data', 'ssh_public_key',
'drive_clone_uuid', 'drive_clone_resize', 'drive_uuid',
'net_ip_uuids', 'net_vlan_uuids'
)
properties_schema = {
#------------------API--------------------------------
API_ENDPOINT: properties.Schema(
properties.Schema.STRING,
_('The URL for the RESTful API. Defaults to https://zrh.cloudsigma.com/api/2.0/'),
required=True,
default='https://zrh.cloudsigma.com/api/2.0/'
),
USERNAME: properties.Schema(
properties.Schema.STRING,
_('The username in the CloudSigma Cloud.'),
required=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('The password in the CloudSigma Cloud.'),
required=True
),
#------------------API--------------------------------
INSTANCE_NAME: properties.Schema(
properties.Schema.STRING,
_('The instance name. The default is "Server <random uuid>".'),
required=False,
default=''
),
MEM_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Memory size in MB. The default is 256MB'),
required=False,
default=256
),
CPU_MHZ: properties.Schema(
properties.Schema.INTEGER,
_('CPU speed in MHz. The default is 250MHz'),
required=False,
default=250
),
VNC_PASSWORD: properties.Schema(
properties.Schema.STRING,
_('The VNC password for remote screen. The default is Cl0ud_Sigma'),
required=True,
default='Cl0ud_Sigma'
),
#------------------DRIVES--------------------------------
# FIXME works with a single drive for the minimal implementation
DRIVE_CLONE_UUID: properties.Schema(
properties.Schema.STRING,
_('Drive UUID to clone and attach'),
required=False
),
DRIVE_CLONE_RESIZE: properties.Schema(
properties.Schema.INTEGER,
_('Resize the cloned drive. Size in bytes. The default 4GB'),
required=False
),
DRIVE_UUID: properties.Schema(
properties.Schema.STRING,
_('Drive UUID to attach'),
required=False
),
#------------------DRIVES--------------------------------
#------------------METADATA--------------------------------
META: properties.Schema(
properties.Schema.STRING,
_('The metadata to pass to the server. It needs to be a proper JSON'),
required=False,
default='{}'
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('The instance\'s description'),
required=False
),
CLOUDINIT_USER_DATA: properties.Schema(
properties.Schema.STRING,
_('Cloudinit user data. Requires cloudinit > 0.7.5.'),
required=False
),
SSH_PUBLIC_KEY: properties.Schema(
properties.Schema.STRING,
_('SSH public key for the default user. Requires cloudinit > 0.7.5.'),
required=False
),
#------------------METADATA--------------------------------
#------------------NETWORKING--------------------------------
# FIXME for a minimal implementation - accept here also dhcp and manual
NET_IP_UUIDS: properties.Schema(
properties.Schema.LIST,
_('The subscribed IP UUID. Can be also "dhcp" or "manual"'),
required=False,
# XXX http://docs.openstack.org/developer/heat/pluginguide.html says that
# Based on the property type, properties without a set value will return the default 'empty' value for LIST is []
# but we get 'NoneType' object is not iterable
default=[]
),
NET_VLAN_UUIDS: properties.Schema(
properties.Schema.LIST,
_('The subscribed VLAN UUIDs.'),
required=False,
# XXX http://docs.openstack.org/developer/heat/pluginguide.html says that
# Based on the property type, properties without a set value will return the default 'empty' value for LIST is []
# but we get 'NoneType' object is not iterable
default=[]
)
#------------------NETWORKING--------------------------------
}
attributes_schema = {
'network_ip': _('Container ip address')
}
def __int__(self, name, json_snippet, stack):
super(CloudSigmaCompute, self).__init__(name, json_snippet, stack)
# -------------------------------- RESOURCE MANAGERS -----------------------------------------------------
# TODO in a more detailed implementation, create a different resource for each of these managers
def _get_compute_manager(self):
endpoint = self.properties.get(self.API_ENDPOINT)
username = self.properties.get(self.USERNAME)
password = self.properties.get(self.PASSWORD)
logger.debug(_("_get_compute_manager api_endpoint=%s, username=%s") % (endpoint, username))
return cloudsigma.resource.Server(
api_endpoint=endpoint,
username=username,
password=password
)
def _get_drive_manager(self):
endpoint = self.properties.get(self.API_ENDPOINT)
username = self.properties.get(self.USERNAME)
password = self.properties.get(self.PASSWORD)
logger.debug(_("_get_drive_manager api_endpoint=%s, username=%s") % (endpoint, username))
return cloudsigma.resource.Drive(
api_endpoint=endpoint,
username=username,
password=password
)
def _get_ip_manager(self):
endpoint = self.properties.get(self.API_ENDPOINT)
username = self.properties.get(self.USERNAME)
password = self.properties.get(self.PASSWORD)
logger.debug(_("_get_ip_manager api_endpoint=%s, username=%s") % (endpoint, username))
return cloudsigma.resource.IP(
api_endpoint=endpoint,
username=username,
password=password
)
def _get_vlan_manager(self):
endpoint = self.properties.get(self.API_ENDPOINT)
username = self.properties.get(self.USERNAME)
password = self.properties.get(self.PASSWORD)
logger.debug(_("_get_ip_manager api_endpoint=%s, username=%s") % (endpoint, username))
return cloudsigma.resource.VLAN(
api_endpoint=endpoint,
username=username,
password=password
)
def _get_compute_data(self, compute_id):
return self._get_compute_manager().get(compute_id)
# -------------------------------- RESOURCE MANAGERS -----------------------------------------------------
def _resolve_attribute(self, name):
if not self.resource_id:
return
if name == 'network_ip':
_instance_data = self._get_compute_data(self.resource_id)
res = []
for nic in _instance_data['nics']:
if nic['runtime']:
try:
res.append(nic['runtime']['ip_v4']['uuid'])
except TypeError:
pass
return res
def handle_create(self):
# create the resource managers
_drive_manager = self._get_drive_manager()
logger.debug(_("list drives %s") % _drive_manager.list())
_compute_manager = self._get_compute_manager()
logger.debug(_("list servers %s") % _compute_manager.list())
# handle meta
_meta = json.loads(self.properties.get(self.META))
if self.properties.get(self.DESCRIPTION):
_meta['description'] = self.properties.get(self.DESCRIPTION)
# FIXME it overwrites the field that may be in meta
if self.properties.get(self.SSH_PUBLIC_KEY):
_meta['ssh_public_key'] = self.properties.get(self.SSH_PUBLIC_KEY)
# FIXME base64 encode - overwrites base64_fields, cloudinit_user_data
if self.properties.get(self.CLOUDINIT_USER_DATA):
_meta['base64_fields'] = 'cloudinit-user-data' #XXX hyphens not underscores!!!
_meta['cloudinit-user-data'] = base64.b64encode(self.properties.get(self.CLOUDINIT_USER_DATA))
# create the server description
_compute_description = {
'name': self.properties.get(self.INSTANCE_NAME),
'cpu': self.properties.get(self.CPU_MHZ),
'mem': self.properties.get(self.MEM_SIZE) * 1024 ** 2,
'vnc_password': self.properties.get(self.VNC_PASSWORD),
'drives': [],
'nics':[],
# we need to parse the JSON format supplied as a parameter to add it as an object - not as a string
'meta': _meta
}
#--------------------------HANDLE DRIVES --------------------------------------
# FIXME drives can be left behind after an unsuccessful create
# decide what to do with the drives
if self.properties.get(self.DRIVE_UUID):
# so we have a drive
# check the drive with GET
_drive = _drive_manager.get(self.properties.get(self.DRIVE_UUID))
# add the drive to the server description
# TODO do append instead of setting the drives list - this requires computing the dev_channel attachment point
_compute_description['drives'].append({
'boot_order': 1,
'dev_channel': "0:0",
'device': "virtio",
'drive': {'uuid':_drive['uuid']}
})
elif self.properties.get(self.DRIVE_CLONE_UUID):
# we need to clone a drive - its an asynchronous operation
# check the drive - will raise an exception if uuid is wrong
# cloudsigma.errors.ClientError: (404, u'[{"error_point": null,
# "error_type": "notexist", "error_message": "Object with uuid b49dc74a-f7a5-42f5-9842-290e7475d67a does not exist"}]')
_drive = _drive_manager.get(self.properties.get(self.DRIVE_CLONE_UUID))
# clone the drive
# will raise exception if is mounted, etc.
_clone = _drive_manager.clone(_drive['uuid'])
# check the clone
while True: # wait loop for the clone creation to finish
if _drive_manager.get(_clone['uuid'])['status'] == 'unmounted': # FIXME is the test right
break
else:
time.sleep(5); # FIXME do we need this, do we need a hard timeout
if self.properties.get(self.DRIVE_CLONE_RESIZE):
# we need to resize the drive
_clone['size'] = self.properties.get(self.DRIVE_CLONE_RESIZE) # TODO sanity check the clone new size
_drive_manager.resize(_clone['uuid'], _clone)
# check the clone
while True: # wait loop for the clone creation to finish
if _drive_manager.get(_clone['uuid'])['status'] == 'unmounted': # FIXME is the test right
break
else:
time.sleep(5); # FIXME do we need this, do we need a hard timeout
# attach the drive by changing the configuration
# TODO do append instead of setting the drives list - this requires computing the dev_channel
_compute_description['drives'].append({
'boot_order': 1,
'dev_channel': "0:0",
'device': "virtio",
'drive': {'uuid':_clone['uuid']}
})
#--------------------------HANDLE DRIVES --------------------------------------
#--------------------------HANDLE NETWORK --------------------------------------
for _ip in self.properties.get(self.NET_IP_UUIDS):
# handle the public IPs
_ip_manager = self._get_ip_manager()
logger.debug(_("list IPs %s") % _ip_manager.list())
# handle the different types of configuration
if _ip == 'dhcp':
_ip_attachment = {'ip_v4_conf': {'conf': 'dhcp'}}
elif _ip == 'manual':
_ip_attachment = {'ip_v4_conf': {'conf': 'manual'}}
else:
# check the uuid with GET
_ip_manager.get(_ip)
_ip_attachment = {'ip_v4_conf': {'conf': 'static', 'ip': _ip}}
# attach the ip
_compute_description['nics'].append(_ip_attachment)
for _vlan in self.properties.get(self.NET_VLAN_UUIDS):
# handle the private networks
_vlan_manager = self._get_vlan_manager()
# check VLAN uuid with GET
_vlan_manager.get(_vlan)
_vlan_attachment = {'vlan': _vlan}
_compute_description['nics'].append(_vlan_attachment)
#--------------------------HANDLE NETWORK --------------------------------------
# create the server with the attached drives and nics
logger.debug(_("Trying to create a VM with this description %s") % _compute_description)
_compute = _compute_manager.create(_compute_description)
logger.debug(_("VM Created %s") % _compute)
# save the uuid for future operations
self.resource_id_set(_compute['uuid'])
# start the sever
_compute_manager.start(_compute['uuid'])
# use this object to check for node creation completion
return _compute['uuid']
# TODO migrate the drive creation here
def check_create_complete(self, _compute_id):
logger.debug(_("Check create server %s") % self.resource_id)
_instance_data = self._get_compute_data(_compute_id)
return _instance_data['status'] == 'running'
def handle_suspend(self):
if not self.resource_id:
return
self._get_compute_manager().stop(self.resource_id)
return self.resource_id
def check_suspend_complete(self, _compute_id):
_instance_data = self._get_compute_data(_compute_id)
return _instance_data['status'] == 'stopped'
def handle_resume(self):
if not self.resource_id:
return
self._get_compute_manager().start(self.resource_id)
return self.resource_id
def check_resume_complete(self, _compute_id):
_instance_data = self._get_compute_data(_compute_id)
return _instance_data['status'] == 'running'
def handle_delete(self):
logger.debug(_("Delete server %s") % self.resource_id)
# enables to delete a stack if it was not created successfully, e.a. no resource_id
if self.resource_id is None:
logger.debug(_("Delete: resource_id is empty - nothing to do, exitting."))
return
_compute_manager = self._get_compute_manager()
# try to get the server description
try:
_instance_data = self._get_compute_data(self.resource_id)
except cloudsigma.errors.ClientError:
# throws an 404 nonexistent - nothing to delete
return
if _instance_data['status'] == 'running':
logger.debug(_("Delete server %s; stopping first ...") % self.resource_id)
_compute_manager.stop(self.resource_id)
time.sleep(5) # XXX wait for the status to update
while self._get_compute_data(self.resource_id)['status'] == 'stopping':
# XXX May happen that the status is still = 'running'
# XXX introduce a hard timeout
logger.debug(_("Delete server %s; waiting to be stopped; sleep 5 secs") % self.resource_id)
time.sleep(5) # FIXME wait a little so not to flood the server
_compute_manager.delete_with_disks(self.resource_id)
def resource_mapping():
return {
'CloudSigma::Compute::Instance': CloudSigmaCompute
}
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "great_expectations-"
cfg.versionfile_source = "great_expectations/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print(f"unable to run {dispcmd}")
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print(f"unable to run {dispcmd} (error)")
print(f"stdout was {stdout}")
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print(f"discarding '{','.join(refs - tags)}', no digits")
if verbose:
print(f"likely tags: {','.join(sorted(tags))}")
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print(f"picking {r}")
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print(f"Directory {root} not under git control")
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
f"{tag_prefix}*",
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = f"unable to parse git-describe output: '{describe_out}'"
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += f"g{pieces['short']}"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += f"+g{pieces['short']}"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError(f"unknown style '{style}'")
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| |
import ccd
from datetime import datetime, timedelta
from functools import reduce, partial
import itertools as it
import logging
import multiprocessing
import numpy as np
from operator import is_not
import warnings
import xarray
############################################################################
## Auxilary Functions
############################################################################
###### Time FUNCTIONS #################################
def _n64_to_datetime(n64):
"""Convert Numpy 64 bit timestamps to datetime objects. Units in seconds"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9)
def _n64_datetime_to_scalar(dt64):
return (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
def _scalar_to_n64_datetime(scalar):
return (scalar * np.timedelta64(1, 's')) + np.datetime64('1970-01-01T00:00:00Z')
###### POST PROCESSING FUNCTIONS ######################
def _identity_transform(x):
return x
def _extract_time_coordinate_and_treat_as_value(da, f = _identity_transform):
value = [[f(da.time.values)]] #dummy value for time scalar
coords = [[da.latitude.values],[da.longitude.values]]
dims=['latitude', 'longitude']
return xarray.DataArray(value, coords, dims)
def _nth_occurence_in_ccd_matrix(ds, n, f = _identity_transform):
xr_pixel_drop_nan = lambda da: da.where(~np.isnan(da), drop = True)
xr_pixel_has_n_values = lambda da: len(da.values) >= n + 1
xr_get_nth_time = lambda da: da.isel(time = n)
time_per_coord_stream = map(
partial(_extract_time_coordinate_and_treat_as_value, f = f), map(
xr_get_nth_time, filter(
xr_pixel_has_n_values, map(
xr_pixel_drop_nan ,_pixel_iterator_from_xarray(ds)))))
return reduce(lambda x, y: x.combine_first(y), time_per_coord_stream)
###### Per Pixel FUNCTIONS ############################
def _run_ccd_on_pixel(ds):
"""Performs CCD on a 1x1xn dataset. Returns CCD results.
Creates a CCD result from a 1x1xn dimensioned dataset. Flattens all bands to perform analysis. Inputs allows for missing bands. cf_mask is required.
Args:
ds: xArray dataset with dimensions 1x1xn with any number of SR bands, cf_mask required.
Returns:
The result of ccd.detect
"""
if 'time' not in ds.dims:
raise Exception("You're missing time dims!")
available_bands = ds.data_vars
scene_count = ds.dims['time']
date = [_n64_to_datetime(t).date().toordinal() for t in ds.time.values]
red = np.ones(scene_count) if 'red' not in available_bands else ds.red.values
green = np.ones(scene_count) if 'green' not in available_bands else ds.green.values
blue = np.ones(scene_count) if 'blue' not in available_bands else ds.blue.values
nir = np.ones(scene_count) if 'nir' not in available_bands else ds.nir.values
swir1 = np.ones(scene_count) if 'swir1' not in available_bands else ds.swir1.values
swir2 = np.ones(scene_count) if 'swir2' not in available_bands else ds.swir2.values
thermals = np.ones(scene_count) * (273.15) * 10 if 'thermal' not in available_bands else ds.object.values
qa = np.array(ds.pixel_qa.values)
params = (date, blue, green, red, nir, swir1, swir2, thermals, qa)
return ccd.detect(*params)
def _convert_ccd_results_into_dataset(results=None, model_dataset=None):
"""Converts the result returned by ccd into a usable xArray dataset
Creates and returns an intermediate product that stores a 1 in lat,lon,time index if change has occured there. Lat Lon values indices are extracted from a 1x1xt model_dataset.
Args:
results: The results of the CCD operation
model_dataset: A dataset with the dimensions that were used to create the ccd result
Returns:
An xarray dataset with a 1 in all indices where change was detected by ccd.
"""
start_times = [datetime.fromordinal(model.start_day) for model in results['change_models']]
intermediate_product = model_dataset.sel(time=start_times, method='nearest')
new_dataset = xarray.DataArray(
np.ones((intermediate_product.dims['time'], 1, 1)).astype(np.int16),
coords=[
intermediate_product.time.values, [intermediate_product.latitude.values],
[intermediate_product.longitude.values]
],
dims=['time', 'latitude', 'longitude'])
return new_dataset.rename("continuous_change")
def _is_pixel(ds):
"""checks if dataset has the size of a pixel
Checks to make sure latitude and longitude are dimensionless
Args:
value: xArray dataset
Returns:
Boolean value - true if the ds is a single pixel
"""
return (len(ds.latitude.dims) == 0) and (len(ds.longitude.dims) == 0)
def _clean_pixel(_ds, saturation_threshold=10000):
"""Filters out over-saturated values
Creates a mask from the saturation threshold and > 0 and applies it to _ds.
Args:
_ds: dataset to mask
saturation_threshold: threshold that a pixel must be below to be considered 'clean'
Returns:
an xArray dataset that has been masked for saturation and valid (>0) pixels
"""
ds = _ds
mask = (ds < saturation_threshold) & (ds >= 0)
indices = [x for x, y in enumerate(mask.red.values) if y == True]
return ds.isel(time=indices)
###### Visualization FUNCTIONS #########################
try:
from matplotlib.pyplot import axvline
import matplotlib.patches as patches
from matplotlib import pyplot as plt
except:
warnings.warn("Failed to load plotting library")
def _lasso_eval(date=None, weights=None, bias=None):
"""Evaluates time-series model for time t using ccd coefficients"""
curves = [
date,
np.cos(2 * np.pi * (date / 365.25)),
np.sin(2 * np.pi * (date / 365.25)),
np.cos(4 * np.pi * (date / 365.25)),
np.sin(4 * np.pi * (date / 365.25)),
np.cos(6 * np.pi * (date / 365.25)),
np.sin(6 * np.pi * (date / 365.25)),
]
return np.dot(weights, curves) + bias
def _intersect(a, b):
"""Returns the Intersection of two sets.
Returns common elements of two iterables
._intersect("apples", "oranges") returns "aes"
Args:
a, b: iterables that can be compared
Returns:
list of common elements between the two input iterables
"""
return list(set(a) & set(b))
def _save_plot_to_file(plot=None, file=None, band_name=None):
"""Saves a plot to a file and labels it using bland_name"""
if isinstance(file_name, str):
file_name = [file_name]
for fn in file_name:
plot.savefig(str.replace(fn, "$BAND$", band), orientation='landscape', papertype='letter', bbox_inches='tight')
def _plot_band(results=None, original_pixel=None, band=None, file_name=None):
"""Plots CCD results for a given band. Accepts a 1x1xt xarray if a scatter-plot overlay of original acquisitions over the ccd results is needed."""
fig = plt.figure(1)
fig.suptitle(band.title(), fontsize=18, verticalalignment='bottom')
lastdt = None
dateLabels = []
for change_model in results["change_models"]:
target = getattr(change_model, band)
time = np.arange(change_model.start_day, change_model.end_day, 1)
ax1 = fig.add_subplot(211)
xy = [(t, _lasso_eval(date=t, weights=target.coefficients, bias=target.intercept)) for t in time]
x, y = zip(*xy)
x = [datetime.fromordinal(t) for t in x]
ax1.plot(x, y, label=target.coefficients)
dt = datetime.fromordinal(change_model.start_day)
dateLabels.append(dt)
if lastdt is not None:
ax1.axvspan(lastdt, dt, color=(0, 0, 0, 0.1))
dt = datetime.fromordinal(change_model.end_day)
dateLabels.append(dt)
lastdt = dt
if original_pixel is not None:
xy = [(_n64_to_datetime(x.time.values) + timedelta(0), x.values) for x in _clean_pixel(original_pixel)[band]
if x < 5000]
x, y = zip(*xy)
ax2 = fig.add_subplot(211)
ax2.scatter(x, y)
ymin, ymax = ax1.get_ylim()
for idx, dt in enumerate(dateLabels):
plt.axvline(x=dt, linestyle='dotted', color=(0, 0, 0, 0.5))
# Top, inside
plt.text(
dt,
ymax,
"\n" + # HACK TO FIX SPACING
dt.strftime('%b %d') + " \n" # HACK TO FIX SPACING
,
rotation=90,
horizontalalignment='right' if (idx % 2) else 'left',
verticalalignment='top')
plt.tight_layout()
if file_name is not None:
_save_plot_to_file(plot=plt, file=filename, band_name=band)
plt.show()
##### Logging Decorators #################################################
def disable_logger(function):
"""Turn off lcmap-pyccd's verbose logging"""
def _func(*params, **kwargs):
logging.getLogger("ccd").setLevel(logging.WARNING)
logging.getLogger("lcmap-pyccd").setLevel(logging.WARNING)
result = function(*params, **kwargs)
return result
return _func
def enable_logger(function):
"""Turn on lcmap-pyccd's verbose logging"""
def _func(*params, **kwargs):
logging.getLogger("ccd").setLevel(logging.DEBUG)
logging.getLogger("lcmap-pyccd").setLevel(logging.DEBUG)
result = function(*params, **kwargs)
return result
return _func
##### THREAD OPS #################################################
def generate_thread_pool():
"""Returns a thread pool utilizing all possible cores
Creates a thread pool using cpu_count to count possible cores
Returns:
A multiprocessing Pool with n processes
"""
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 2
return multiprocessing.Pool(processes=(cpus))
def destroy_thread_pool(pool):
"""Destroys a thread pool
Destroys a thread pool created with generate_thread_pool
Args:
pool: a multiprocessing pool
"""
pool.close()
pool.join()
###### ITERATOR FUNCTIONS ##########################################
def _pixel_iterator_from_xarray(ds):
"""Accepts an xarray. Creates an iterator of 1x1xt xarray dataset `pixels`
Creates an iterable from dataset pixels usable with multiprocessing pool distribution
Args:
ds: An xArray with the dimensions of latitude, longitude, and time
Returns:
An iterable consisting of xArray datasets with a single latitude/longitude dim with n time dimensions
"""
lat_size = len(ds.latitude)
lon_size = len(ds.longitude)
cartesian = it.product(range(lat_size), range(lon_size))
return map(lambda x: ds.isel(latitude=x[0], longitude=x[1]), cartesian)
def _ccd_product_from_pixel(pixel):
"""Creates a ccd-product for a given pixel
Runs the ccd operation on a pixel and converts the results into a dataset
Args:
pixel: An xArray dataset with dimensions latitude, longitude, and time with dims of 1x1xt
Returns:
An xArray dataset with the same dimensions as pixel - the output of the _convert_ccd_results_into_dataset func
"""
try:
ccd_results = _run_ccd_on_pixel(pixel)
ccd_product = _convert_ccd_results_into_dataset(results=ccd_results, model_dataset=pixel)
return ccd_product
except np.linalg.LinAlgError:
# This is used to combat matrix inversion issues for Singular matrices.
return None
def _ccd_product_iterator_from_pixels(pixels, distributed=False):
"""Creates an iterator of ccd-products from a iterator of pixels. This function handles the distributed processing of CCD.
Creates an iterator of ccd products from a pixel iterator generated with _pixel_iterator_from_xarray. Distributes with multiprocessing if distributed.
Args:
pixels: iterator of pixel datasets each with dimensions latitude, longitude, and time with dims of 1x1xt
distributed: Boolean value signifying whether or not the multiprocessing module should be used to distribute accross all cores
Returns:
An iterator of xArray dataset ccd product pixels with the same dimensions as pixels
"""
if distributed == True:
pool = generate_thread_pool()
ccd_product_pixels = None
try:
ccd_product_pixels = pool.imap_unordered(_ccd_product_from_pixel, pixels)
destroy_thread_pool(pool)
return ccd_product_pixels
except:
destroy_thread_pool(pool)
raise
else:
ccd_product_pixels = map(_ccd_product_from_pixel, pixels)
return ccd_product_pixels
def _rebuild_xarray_from_pixels(pixels):
"""Combines pixel sized ccd-products into a larger xarray object.
Used to combine single pixels with latitude, longitude, time back into a single xArray dataset instance
Args:
pixels: iterable of xArray datasets that can be combined using combine_first
Returns:
An xArray dataset with the dimensions of pixels
"""
return reduce(lambda x, y: x.combine_first(y), pixels)
###################################################################
## Callable Functions
###################################################################
@disable_logger
def _generate_change_matrix(ds, distributed= False):
"""Runs CCD on an xarray datastructure
Computes CCD calculations on every pixel within an xarray dataset.
Args:
ds: (xarray) An xarray dataset containing landsat bands.
The following bands are used in computing CCD [red, green, blue, nir,swir1,swir2,thermal, qa]
Missing bands are masked with an array of ones.
distributed: (Boolean) toggles full utilization of all processing cores for distributed computation of CCD
Returns:
An Xarray detailing per-pixel 'change_volume'. Change volume is the number changes detected given the extents provided.
"""
pixels = _pixel_iterator_from_xarray(ds)
ccd_products = _ccd_product_iterator_from_pixels(pixels, distributed=distributed)
ccd_products = filter(partial(is_not, None), ccd_products)
ccd_change_count_xarray = _rebuild_xarray_from_pixels(ccd_products) # Change matrix
return ccd_change_count_xarray
def process_xarray(ds, distributed=False, process = "change_count"):
### Instead of using an `if process = "moving_avg"` if ladder to add and remove
### processing options, we use a dictionary to look up our processing options.
### Declare several processing outputs.
def generate_matrix():
return _generate_change_matrix(ds, distributed = distributed)
def change_count():
return (generate_matrix().sum(dim='time') - 1).rename('change_volume')
def first_change():
return _nth_occurence_in_ccd_matrix(generate_matrix(),
1,
f = _n64_datetime_to_scalar)
processing_options = {
"change_count": change_count,
"first": first_change,
"matrix": generate_matrix
}
return processing_options[process]()
@disable_logger
def process_pixel(ds):
"""Runs CCD on a 1x1 xarray
Computes CCD calculations on a 1x1 xarray.
Args:
ds: (xarray) An xarray dataset containing landsat bands.
The following bands are used in computing CCD [red, green, blue, nir,swir1,swir2,thermal, qa]
Missing bands are masked with an array of ones.
Returns:
A duplicate xarray with CCD results in attrs.
"""
if _is_pixel(ds) is not True:
raise Exception("Incorrect dimensions for pixel operation.")
duplicate_pixel = ds.copy(deep=True)
ccd_results = _run_ccd_on_pixel(duplicate_pixel)
duplicate_pixel.attrs['ccd_results'] = ccd_results
duplicate_pixel.attrs['ccd_start_times'] = [
datetime.fromordinal(model.start_day) for model in ccd_results['change_models']
]
duplicate_pixel.attrs['ccd_end_times'] = [
datetime.fromordinal(model.end_day) for model in ccd_results['change_models']
]
duplicate_pixel.attrs['ccd_break_times'] = [
datetime.fromordinal(model.break_day) for model in ccd_results['change_models']
]
duplicate_pixel.attrs['ccd'] = True
return duplicate_pixel
def plot_pixel(ds, bands=None):
"""Plot time-series for processed pixels
Computes CCD calculations on a 1x1 xarray.
Args:
ds: (xarray) An xarray dataset that has been processed using the `process_pixel()` function.
bands: (list)(string) Bands that should be plotted/displayed. Passing a 'None' value plots time-series models for all bands. Default value is "None"
"""
if 'ccd' not in list(ds.attrs.keys()):
raise Exception("This pixel hasn't been processed by CCD. Use the `ccd.process_pixel()` function.")
if bands is None or bands is []:
possible_bands = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2', 'thermal']
avaliable_bands = ds.data_vars
bands = _intersect(possible_bands, avaliable_bands)
for band in bands:
_plot_band(results=ds.attrs['ccd_results'], original_pixel=ds, band=band)
| |
import unittest
import troposphere.rds as rds
from troposphere import If, Parameter, Ref
class TestRDS(unittest.TestCase):
def test_it_allows_an_rds_instance_created_from_a_snapshot(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=100,
DBInstanceClass='db.m1.small',
Engine='MySQL',
DBSnapshotIdentifier='SomeSnapshotIdentifier'
)
rds_instance.JSONrepr()
def test_it_allows_an_rds_instance_with_master_username_and_password(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword'
)
rds_instance.JSONrepr()
def test_it_rds_instances_require_either_a_snapshot_or_credentials(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL'
)
with self.assertRaisesRegexp(
ValueError,
'Either \(MasterUsername and MasterUserPassword\) or'
' DBSnapshotIdentifier are required'
):
rds_instance.JSONrepr()
def test_it_allows_an_rds_replica(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier'
)
rds_instance.JSONrepr()
def test_replica_settings_are_inherited(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier',
BackupRetentionPeriod="1",
DBName="SomeName",
MasterUsername="SomeUsername",
MasterUserPassword="SomePassword",
PreferredBackupWindow="10:00-11:00",
MultiAZ=True,
DBSnapshotIdentifier="SomeDBSnapshotIdentifier",
DBSubnetGroupName="SomeDBSubnetGroupName",
)
with self.assertRaisesRegexp(
ValueError,
'BackupRetentionPeriod, DBName, DBSnapshotIdentifier, '
'DBSubnetGroupName, MasterUserPassword, MasterUsername, '
'MultiAZ, PreferredBackupWindow '
'properties can\'t be provided when '
'SourceDBInstanceIdentifier is present '
'AWS::RDS::DBInstance.'
):
rds_instance.JSONrepr()
def test_it_rds_instances_require_encryption_if_kms_key_provided(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
KmsKeyId='arn:aws:kms:us-east-1:123456789012:key/'
'12345678-1234-1234-1234-123456789012'
)
with self.assertRaisesRegexp(
ValueError,
'If KmsKeyId is provided, StorageEncrypted is required'
):
rds_instance.JSONrepr()
def test_it_allows_an_rds_instance_with_iops(self):
# ensure troposphere works with longs and ints
try:
long_number = long(2000)
except NameError:
# Python 3 doesn't have 'long' anymore
long_number = 2000
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=200,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
StorageType='io1',
Iops=long_number,
)
rds_instance.JSONrepr()
def test_optiongroup(self):
rds_optiongroup = rds.OptionGroup(
"OracleOptionGroup",
EngineName="oracle-ee",
MajorEngineVersion="12.1",
OptionGroupDescription="A test option group",
OptionConfigurations=[
rds.OptionConfiguration(
DBSecurityGroupMemberships=["default"],
OptionName="OEM",
Port="5500",
),
rds.OptionConfiguration(
OptionName="APEX",
),
]
)
rds_optiongroup.JSONrepr()
def test_fail_az_and_multiaz(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone="us-east-1",
MultiAZ=True)
with self.assertRaisesRegexp(ValueError, "if MultiAZ is set to "):
i.JSONrepr()
def test_az_and_multiaz_funcs(self):
AWS_NO_VALUE = "AWS::NoValue"
db_az = "us-east-1"
db_multi_az = Parameter("dbmultiaz", Type="String")
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone=If("db_az", Ref(db_az), Ref(AWS_NO_VALUE)),
MultiAZ=Ref(db_multi_az),
)
i.validate()
def test_io1_storage_type_and_iops(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1')
with self.assertRaisesRegexp(ValueError,
"Must specify Iops if "):
i.JSONrepr()
def test_storage_to_iops_ratio(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1',
Iops=4000,
AllocatedStorage=10)
with self.assertRaisesRegexp(ValueError,
" must be at least 100 "):
i.JSONrepr()
i.AllocatedStorage = 100
with self.assertRaisesRegexp(ValueError,
" must be no less than 1/10th "):
i.JSONrepr()
i.AllocatedStorage = 400
i.JSONrepr()
class TestRDSValidators(unittest.TestCase):
def test_validate_iops(self):
with self.assertRaises(ValueError):
rds.validate_iops(500)
with self.assertRaises(ValueError):
rds.validate_iops(20000)
rds.validate_iops(2000)
rds.validate_iops(0)
def test_validate_storage_type(self):
for t in rds.VALID_STORAGE_TYPES:
rds.validate_storage_type(t)
with self.assertRaises(ValueError):
rds.validate_storage_type("bad_storage_type")
def test_validate_engine(self):
for e in rds.VALID_DB_ENGINES:
rds.validate_engine(e)
with self.assertRaises(ValueError):
rds.validate_engine("bad_engine")
def test_validate_license_model(self):
for lm in rds.VALID_LICENSE_MODELS:
rds.validate_license_model(lm)
with self.assertRaises(ValueError):
rds.validate_license_model("bad_license_model")
def test_validate_backup_window(self):
good_windows = ("10:00-11:00", "22:00-06:00")
for w in good_windows:
rds.validate_backup_window(w)
bad_format = ("bad_backup_window", "28:11-10:00", "10:00-28:11")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_backup_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_backup_window("10:00-10:10")
def test_validate_maintenance_window(self):
good_windows = ("Mon:10:00-Mon:16:30", "Mon:10:00-Wed:10:00",
"Sun:16:00-Mon:11:00")
for w in good_windows:
rds.validate_maintenance_window(w)
bad_format = ("bad_mainteance", "Mon:10:00-Tue:28:00", "10:00-22:00")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_maintenance_window(w)
bad_days = ("Boo:10:00-Woo:10:30", "Boo:10:00-Tue:10:30",
"Mon:10:00-Boo:10:30")
for w in bad_days:
with self.assertRaisesRegexp(ValueError, " day part of ranges "):
rds.validate_maintenance_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_maintenance_window("Mon:10:00-Mon:10:10")
def test_validate_backup_retention_period(self):
for d in (1, 10, 15, 35):
rds.validate_backup_retention_period(d)
with self.assertRaisesRegexp(ValueError,
" cannot be larger than 35 "):
rds.validate_backup_retention_period(40)
rds.validate_backup_retention_period(10)
| |
scene.run("BehaviorSetCommon.py")
def setupBehaviorSet():
print "Setting up behavior set for gestures..."
#scene.loadAssetsFromPath("behaviorsets/MocapReaching/skeletons")
#scene.loadAssetsFromPath("behaviorsets/MocapReaching/motions")
scene.addAssetPath("script", "behaviorsets/MocapReaching/scripts")
# map the zebra2 skeleton
assetManager = scene.getAssetManager()
motionPath = "behaviorsets/MocapReaching/motions/"
skel = scene.getSkeleton("ChrGarza.sk")
if skel == None:
scene.loadAssetsFromPath("behaviorsets/MocapReaching/skeletons")
scene.run("zebra2-map.py")
zebra2Map = scene.getJointMapManager().getJointMap("zebra2")
garzaSkeleton = scene.getSkeleton("ChrGarza.sk")
zebra2Map.applySkeleton(garzaSkeleton)
mocapRReachMotions = StringVec();
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackFloor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackHigh01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackLow01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumNear01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachForwardFloor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachForwardHigh01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachForwardLow01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachForwardMediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachForwardMediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30Floor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30High01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30Low01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumNear01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60Floor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60High01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60Low01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumNear01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120Floor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120High01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120Low01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumNear01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30Floor01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30High01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30Low01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumFar01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumMid01")
mocapRReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumNear01")
mocapRReachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Grasp")
mocapRReachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Reach")
mocapRReachMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Release")
mocapRReachMotions.append("HandsAtSide_RArm_GestureYou")
mocapLReachMotions = StringVec();
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackFloor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackHigh01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackLow01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumNear01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachForwardFloor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachForwardHigh01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachForwardLow01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachForwardMediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachForwardMediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30Floor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30High01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30Low01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumNear01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60Floor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60High01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60Low01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumNear01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120Floor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120High01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120Low01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumNear01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30Floor01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30High01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30Low01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumFar01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumMid01")
mocapLReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumNear01")
mocapLReachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Grasp")
mocapLReachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Reach")
mocapLReachMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Release")
mocapLReachMotions.append("HandsAtSide_LArm_GestureYou")
# mirror the right hand motions to the left hand side
for i in range(0,len(mocapRReachMotions)):
motion = scene.getMotion(mocapRReachMotions[i])
if motion == None:
assetManager.loadAsset(motionPath+mocapRReachMotions[i]+'.skm')
motion = scene.getMotion(mocapRReachMotions[i])
#print 'motionName = ' + locoMotions[i]
if motion != None:
motion.setMotionSkeletonName("ChrGarza.sk")
zebra2Map.applyMotion(motion)
mirrorMotion1 = scene.getMotion(mocapRReachMotions[i])
mirrorMotion1.mirror(mocapLReachMotions[i], "ChrGarza.sk")
zebra2Map.applyMotion(scene.getMotion("ChrGarza@IdleStand01"))
def retargetBehaviorSet(charName):
mocapReachMotions = StringVec();
mocapReachMotions.append("ChrGarza@IdleStand01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackFloor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackHigh01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackLow01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachBackMediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachForwardFloor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachForwardHigh01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachForwardLow01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachForwardMediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachForwardMediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30High01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft30MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60High01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachLeft60MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120High01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight120MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30High01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_ReachRight30MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackFloor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackHigh01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackLow01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachBackMediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachForwardFloor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachForwardHigh01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachForwardLow01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachForwardMediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachForwardMediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30High01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft30MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60High01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachLeft60MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120High01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight120MediumNear01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30Floor01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30High01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30Low01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumFar01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumMid01")
mocapReachMotions.append("ChrGarza@IdleStand01_LReachRight30MediumNear01")
grabMotions = StringVec()
grabMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Grasp")
grabMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Grasp")
grabMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Reach")
grabMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Reach")
grabMotions.append("ChrHarmony_Relax001_HandGraspSmSphere_Release")
grabMotions.append("ChrHarmony_Relax001_LHandGraspSmSphere_Release")
grabMotions.append("HandsAtSide_LArm_GestureYou")
grabMotions.append("HandsAtSide_RArm_GestureYou")
#outDir = scene.getMediaPath() + '/retarget/motion/' + skelName + '/';
#print 'outDir = ' + outDir ;
#if not os.path.exists(outDir):
# os.makedirs(outDir)
sbChar = scene.getCharacter(charName)
if sbChar == None:
return
skelName = sbChar.getSkeleton().getName()
createRetargetInstance('ChrGarza.sk', skelName)
assetManager = scene.getAssetManager()
for i in range(0, len(mocapReachMotions)):
sbMotion = assetManager.getMotion(mocapReachMotions[i])
if sbMotion != None:
sbMotion.setMotionSkeletonName('ChrGarza.sk')
for i in range(0, len(grabMotions)):
sbMotion = assetManager.getMotion(grabMotions[i])
if sbMotion != None:
sbMotion.setMotionSkeletonName('common.sk')
# retarget mocap reaching
#for n in range(0, len(mocapReachMotions)):
# motion = scene.getMotion(mocapReachMotions[n])
# if motion is not None:
# retargetMotion(mocapReachMotions[n], 'ChrGarza.sk', skelName, outDir + 'MocapReaching/');
# else:
# print "Cannot find motion " + mocapReachMotions[n] + ", it will be excluded from the reach setup..."
scene.run("init-example-reach-mocap.py")
reachSetup(charName, "KNN", 'ChrGarza.sk', '')
| |
# -*- coding: utf-8 -*-
# Import python libs
import sys
import re
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
ensure_in_syspath('../../')
# Import salt libs
from salt.modules import virt
from salt.modules import config
from salt._compat import ElementTree as ET
import salt.utils
# Import third party libs
import yaml
config.__grains__ = {}
config.__opts__ = {}
config.__pillar__ = {}
virt.__salt__ = {
'config.get': config.get,
'config.option': config.option,
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class VirtTestCase(TestCase):
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_boot_default_dev(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm'
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('os/boot').attrib['dev'], 'hd')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_boot_custom_dev(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
boot_dev='cdrom'
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('os/boot').attrib['dev'], 'cdrom')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_boot_multiple_devs(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
boot_dev='cdrom network'
)
root = ET.fromstring(xml_data)
devs = root.findall('.//boot')
self.assertTrue(len(devs) == 2)
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_serial_console(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
serial_type='pty',
console=True
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
self.assertEqual(root.find('devices/console').attrib['type'], 'pty')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_telnet_console(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
serial_type='tcp',
console=True,
telnet_port=22223
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console/source').attrib['service'], '22223')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_telnet_console_unspecified_port(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
serial_type='tcp',
console=True
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console').attrib['type'], 'tcp')
self.assertIsInstance(int(root.find('devices/console/source').attrib['service']), int)
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_serial_no_console(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
serial_type='pty',
console=False
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
self.assertEqual(root.find('devices/console'), None)
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_telnet_no_console(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
serial_type='tcp',
console=False,
)
root = ET.fromstring(xml_data)
self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
self.assertEqual(root.find('devices/console'), None)
def test_default_disk_profile_hypervisor_esxi(self):
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}):
ret = virt._disk_profile('nonexistant', 'esxi')
self.assertTrue(len(ret) == 1)
self.assertIn('system', ret[0])
system = ret[0]['system']
self.assertEqual(system['format'], 'vmdk')
self.assertEqual(system['model'], 'scsi')
self.assertTrue(system['size'] >= 1)
def test_default_disk_profile_hypervisor_kvm(self):
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}):
ret = virt._disk_profile('nonexistant', 'kvm')
self.assertTrue(len(ret) == 1)
self.assertIn('system', ret[0])
system = ret[0]['system']
self.assertEqual(system['format'], 'qcow2')
self.assertEqual(system['model'], 'virtio')
self.assertTrue(system['size'] >= 1)
def test_default_nic_profile_hypervisor_esxi(self):
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}):
ret = virt._nic_profile('nonexistant', 'esxi')
self.assertTrue(len(ret) == 1)
eth0 = ret[0]
self.assertEqual(eth0['name'], 'eth0')
self.assertEqual(eth0['type'], 'bridge')
self.assertEqual(eth0['source'], 'DEFAULT')
self.assertEqual(eth0['model'], 'e1000')
def test_default_nic_profile_hypervisor_kvm(self):
mock = MagicMock(return_value={})
with patch.dict(virt.__salt__, {'config.get': mock}):
ret = virt._nic_profile('nonexistant', 'kvm')
self.assertTrue(len(ret) == 1)
eth0 = ret[0]
self.assertEqual(eth0['name'], 'eth0')
self.assertEqual(eth0['type'], 'bridge')
self.assertEqual(eth0['source'], 'br0')
self.assertEqual(eth0['model'], 'virtio')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_vol_xml_for_kvm(self):
xml_data = virt._gen_vol_xml('vmname', 'system', 8192, 'kvm')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'vmname/system.qcow2')
self.assertEqual(root.find('key').text, 'vmname/system')
self.assertEqual(root.find('capacity').attrib['unit'], 'KiB')
self.assertEqual(root.find('capacity').text, str(8192 * 1024))
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_vol_xml_for_esxi(self):
xml_data = virt._gen_vol_xml('vmname', 'system', 8192, 'esxi')
root = ET.fromstring(xml_data)
self.assertEqual(root.find('name').text, 'vmname/system.vmdk')
self.assertEqual(root.find('key').text, 'vmname/system')
self.assertEqual(root.find('capacity').attrib['unit'], 'KiB')
self.assertEqual(root.find('capacity').text, str(8192 * 1024))
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_kvm_default_profile(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'kvm')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, str(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
disks = root.findall('.//disk')
self.assertEqual(len(disks), 1)
disk = disks[0]
self.assertTrue(disk.find('source').attrib['file'].startswith('/'))
self.assertTrue('hello/system' in disk.find('source').attrib['file'])
self.assertEqual(disk.find('target').attrib['dev'], 'vda')
self.assertEqual(disk.find('target').attrib['bus'], 'virtio')
self.assertEqual(disk.find('driver').attrib['name'], 'qemu')
self.assertEqual(disk.find('driver').attrib['type'], 'qcow2')
interfaces = root.findall('.//interface')
self.assertEqual(len(interfaces), 1)
iface = interfaces[0]
self.assertEqual(iface.attrib['type'], 'bridge')
self.assertEqual(iface.find('source').attrib['bridge'], 'br0')
self.assertEqual(iface.find('model').attrib['type'], 'virtio')
mac = iface.find('mac').attrib['address']
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_gen_xml_for_esxi_default_profile(self):
diskp = virt._disk_profile('default', 'esxi')
nicp = virt._nic_profile('default', 'esxi')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'esxi',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'vmware')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, str(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
disks = root.findall('.//disk')
self.assertEqual(len(disks), 1)
disk = disks[0]
self.assertTrue('[0]' in disk.find('source').attrib['file'])
self.assertTrue('hello/system' in disk.find('source').attrib['file'])
self.assertEqual(disk.find('target').attrib['dev'], 'sda')
self.assertEqual(disk.find('target').attrib['bus'], 'scsi')
self.assertEqual(disk.find('address').attrib['unit'], '0')
interfaces = root.findall('.//interface')
self.assertEqual(len(interfaces), 1)
iface = interfaces[0]
self.assertEqual(iface.attrib['type'], 'bridge')
self.assertEqual(iface.find('source').attrib['bridge'], 'DEFAULT')
self.assertEqual(iface.find('model').attrib['type'], 'e1000')
mac = iface.find('mac').attrib['address']
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
@patch('salt.modules.virt._nic_profile')
@patch('salt.modules.virt._disk_profile')
def test_gen_xml_for_esxi_custom_profile(self, disk_profile, nic_profile):
diskp_yaml = '''
- first:
size: 8192
format: vmdk
model: scsi
pool: datastore1
- second:
size: 4096
format: vmdk # FIX remove line, currently test fails
model: scsi # FIX remove line, currently test fails
pool: datastore2
'''
nicp_yaml = '''
- type: bridge
name: eth1
source: ONENET
model: e1000
mac: '00:00:00:00:00:00'
- name: eth2
type: bridge
source: TWONET
model: e1000
mac: '00:00:00:00:00:00'
'''
disk_profile.return_value = yaml.load(diskp_yaml)
nic_profile.return_value = yaml.load(nicp_yaml)
diskp = virt._disk_profile('noeffect', 'esxi')
nicp = virt._nic_profile('noeffect', 'esxi')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'esxi',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'vmware')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, str(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
self.assertTrue(len(root.findall('.//disk')) == 2)
self.assertTrue(len(root.findall('.//interface')) == 2)
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
@patch('salt.modules.virt._nic_profile')
@patch('salt.modules.virt._disk_profile')
def test_gen_xml_for_kvm_custom_profile(self, disk_profile, nic_profile):
diskp_yaml = '''
- first:
size: 8192
format: qcow2
model: virtio
pool: /var/lib/images
- second:
size: 4096
format: qcow2 # FIX remove line, currently test fails
model: virtio # FIX remove line, currently test fails
pool: /var/lib/images
'''
nicp_yaml = '''
- type: bridge
name: eth1
source: b2
model: virtio
mac: '00:00:00:00:00:00'
- name: eth2
type: bridge
source: b2
model: virtio
mac: '00:00:00:00:00:00'
'''
disk_profile.return_value = yaml.load(diskp_yaml)
nic_profile.return_value = yaml.load(nicp_yaml)
diskp = virt._disk_profile('noeffect', 'kvm')
nicp = virt._nic_profile('noeffect', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm',
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib['type'], 'kvm')
self.assertEqual(root.find('vcpu').text, '1')
self.assertEqual(root.find('memory').text, str(512 * 1024))
self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
self.assertTrue(len(root.findall('.//disk')) == 2)
self.assertTrue(len(root.findall('.//interface')) == 2)
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_controller_for_esxi(self):
diskp = virt._disk_profile('default', 'esxi')
nicp = virt._nic_profile('default', 'esxi')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'esxi'
)
root = ET.fromstring(xml_data)
controllers = root.findall('.//devices/controller')
self.assertTrue(len(controllers) == 1)
controller = controllers[0]
self.assertEqual(controller.attrib['model'], 'lsilogic')
@skipIf(sys.version_info < (2, 7), 'ElementTree version 1.3 required'
' which comes with Python 2.7')
def test_controller_for_kvm(self):
diskp = virt._disk_profile('default', 'kvm')
nicp = virt._nic_profile('default', 'kvm')
xml_data = virt._gen_xml(
'hello',
1,
512,
diskp,
nicp,
'kvm'
)
root = ET.fromstring(xml_data)
controllers = root.findall('.//devices/controller')
# There should be no controller
self.assertTrue(len(controllers) == 0)
def test_mixed_dict_and_list_as_profile_objects(self):
yaml_config = '''
virt.nic:
new-listonly-profile:
- bridge: br0
name: eth0
- model: virtio
name: eth1
source: test_network
type: network
new-list-with-legacy-names:
- eth0:
bridge: br0
- eth1:
bridge: br1
model: virtio
non-default-legacy-profile:
eth0:
bridge: br0
eth1:
bridge: br1
model: virtio
'''
mock_config = yaml.load(yaml_config)
salt.modules.config.__opts__ = mock_config
for name in mock_config['virt.nic'].keys():
profile = salt.modules.virt._nic_profile(name, 'kvm')
self.assertEqual(len(profile), 2)
interface_attrs = profile[0]
self.assertIn('source', interface_attrs)
self.assertIn('type', interface_attrs)
self.assertIn('name', interface_attrs)
self.assertIn('model', interface_attrs)
self.assertEqual(interface_attrs['model'], 'virtio')
self.assertIn('mac', interface_attrs)
self.assertTrue(
re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$',
interface_attrs['mac'] , re.I))
if __name__ == '__main__':
from integration import run_tests
run_tests(VirtTestCase, needs_daemon=False)
| |
from StringIO import StringIO
import pickle
import sys
import gc
import copy
from os import path
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested
import numpy as np
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assert_(a[1] == 'auto')
self.assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assert_(b != 'auto')
self.assert_(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif(sys.version_info[0] >= 3,
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert(np.all(a[ya] > 0.5))
assert(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert(type(a.squeeze()) is np.ndarray)
assert(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert abs(arr-res2).max() < 1e-8, func
else:
assert abs(res1-res2).max() < 1e-8, func
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert abs(res1-res2).max() < 1e-8, func
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert ra['x'] != rb['x']
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert np.dtype('i4') == np.dtype(('i4',()))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert a.argmax() == 2
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert asbytes(r['var1'][0][0]) == asbytes('abc')
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert a[a.argmax()] == a.max()
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s): x[(s>0)]=1.0
self.assertRaises(ValueError,ia,x,s)
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert cnt(a) == cnt0_a + 5
arr[:] = b
assert cnt(a) == cnt0_a
assert cnt(b) == cnt0_b + 5
arr[:2] = c
assert cnt(b) == cnt0_b + 3
assert cnt(c) == cnt0_c + 2
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert cnt(a) == cnt0_a + 1
arr0[1] = b
assert cnt(b) == cnt0_b + 1
arr[:,:] = arr0
assert cnt(a) == cnt0_a + 6
assert cnt(b) == cnt0_b + 6
arr[:,0] = None
assert cnt(a) == cnt0_a + 1
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr2 = arr.copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
arr2 = arr[:,0].copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 5
arr2 = arr.flatten()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr3 = np.concatenate((arr1, arr2))
assert cnt(a) == cnt0_a + 5 + 5
assert cnt(b) == cnt0_b + 5 + 5
arr3 = arr1.repeat(3, axis=0)
assert cnt(a) == cnt0_a + 5 + 3*5
arr3 = arr1.take([1,2,3], axis=0)
assert cnt(a) == cnt0_a + 5 + 3
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert cnt(a) == cnt0_a + 5 + 2
assert cnt(b) == cnt0_b + 5 + 3
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert not arr[0].deleted
arr[:] = arr # trying to induce a segfault by doing it again...
assert not arr[0].deleted
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert not np.any(a)
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_complex_dtype_printing(self, level=rlevel):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
xp = pickle.load(open(filename, 'rb'), encoding='latin1')
else:
xp = pickle.load(open(filename))
xpd = xp.astype(np.float64)
assert (xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0])
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
import numpy as np
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert dat.info == 'jubba'
dat.resize((4,2))
assert dat.info == 'jubba'
dat.sort()
assert dat.info == 'jubba'
dat.fill(2)
assert dat.info == 'jubba'
dat.put([2,3,4],[6,3,4])
assert dat.info == 'jubba'
dat.setfield(4, np.int32,0)
assert dat.info == 'jubba'
dat.setflags()
assert dat.info == 'jubba'
assert dat.all(1).info == 'jubba'
assert dat.any(1).info == 'jubba'
assert dat.argmax(1).info == 'jubba'
assert dat.argmin(1).info == 'jubba'
assert dat.argsort(1).info == 'jubba'
assert dat.astype(TestArray).info == 'jubba'
assert dat.byteswap().info == 'jubba'
assert dat.clip(2,7).info == 'jubba'
assert dat.compress([0,1,1]).info == 'jubba'
assert dat.conj().info == 'jubba'
assert dat.conjugate().info == 'jubba'
assert dat.copy().info == 'jubba'
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert dat2.choose(choices).info == 'jubba'
assert dat.cumprod(1).info == 'jubba'
assert dat.cumsum(1).info == 'jubba'
assert dat.diagonal().info == 'jubba'
assert dat.flatten().info == 'jubba'
assert dat.getfield(np.int32,0).info == 'jubba'
assert dat.imag.info == 'jubba'
assert dat.max(1).info == 'jubba'
assert dat.mean(1).info == 'jubba'
assert dat.min(1).info == 'jubba'
assert dat.newbyteorder().info == 'jubba'
assert dat.nonzero()[0].info == 'jubba'
assert dat.nonzero()[1].info == 'jubba'
assert dat.prod(1).info == 'jubba'
assert dat.ptp(1).info == 'jubba'
assert dat.ravel().info == 'jubba'
assert dat.real.info == 'jubba'
assert dat.repeat(2).info == 'jubba'
assert dat.reshape((2,4)).info == 'jubba'
assert dat.round().info == 'jubba'
assert dat.squeeze().info == 'jubba'
assert dat.std(1).info == 'jubba'
assert dat.sum(1).info == 'jubba'
assert dat.swapaxes(0,1).info == 'jubba'
assert dat.take([2,3,5]).info == 'jubba'
assert dat.transpose().info == 'jubba'
assert dat.T.info == 'jubba'
assert dat.var(1).info == 'jubba'
assert dat.view(TestArray).info == 'jubba'
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert( a[0].tolist() == b[0])
assert( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert a.dtype.names[0] == "notfoo"
assert a.dtype.names[1] == "bar"
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert a is None
assert type(b) is int
assert type(b2) is float
assert type(c) is np.ndarray
assert c.dtype == object
assert d.dtype == object
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert n_before >= n_after, (n_before, n_after)
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert arr[0][0] == 'john'
assert arr[0][1] == 4
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert np.all(z == 0)
assert z.shape == (m, n)
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.ones(2)
b = np.array(3)
assert_raises(ValueError, lambda: np.array((a, b)))
t = ((1,), np.array(1))
assert_raises(ValueError, lambda: np.array(t))
@dec.knownfailureif(True, "Fix this for 1.5.0.")
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_raises(ValueError, lambda: np.array(t))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [2**10]*10)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert a.itemsize == 16
a = np.array([u'123', '1234'])
assert a.itemsize == 16
a = np.array(['1234', u'123', '12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'1234'])
assert a.itemsize == 16
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert x[0,1] == x[0,0]
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert sys.getrefcount(strb) == numb
assert sys.getrefcount(stra) == numa + 2
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min /= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timeinteger))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
if __name__ == "__main__":
run_module_suite()
| |
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import sys
from os import getcwd
from os.path import join as joinpath
import CpuConfig
import MemConfig
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import *
addToPath('../common')
def getCPUClass(cpu_type):
"""Returns the required cpu class and the mode of operation."""
cls = CpuConfig.get(cpu_type)
return cls, cls.memory_mode()
def setCPUClass(options):
"""Returns two cpu classes and the initial mode of operation.
Restoring from a checkpoint or fast forwarding through a benchmark
can be done using one type of cpu, and then the actual
simulation can be carried out using another type. This function
returns these two types of cpus and the initial mode of operation
depending on the options provided.
"""
TmpClass, test_mem_mode = getCPUClass(options.cpu_type)
CPUClass = None
if TmpClass.require_caches() and \
not options.caches and not options.ruby:
fatal("%s must be used with caches" % options.cpu_type)
if options.checkpoint_restore != None:
if options.restore_with_cpu != options.cpu_type:
CPUClass = TmpClass
TmpClass, test_mem_mode = getCPUClass(options.restore_with_cpu)
elif options.fast_forward:
CPUClass = TmpClass
TmpClass = AtomicSimpleCPU
test_mem_mode = 'atomic'
return (TmpClass, test_mem_mode, CPUClass)
def setMemClass(options):
"""Returns a memory controller class."""
return MemConfig.get(options.mem_type)
def setWorkCountOptions(system, options):
if options.work_item_id != None:
system.work_item_id = options.work_item_id
if options.work_begin_cpu_id_exit != None:
system.work_begin_cpu_id_exit = options.work_begin_cpu_id_exit
if options.work_end_exit_count != None:
system.work_end_exit_count = options.work_end_exit_count
if options.work_end_checkpoint_count != None:
system.work_end_ckpt_count = options.work_end_checkpoint_count
if options.work_begin_exit_count != None:
system.work_begin_exit_count = options.work_begin_exit_count
if options.work_begin_checkpoint_count != None:
system.work_begin_ckpt_count = options.work_begin_checkpoint_count
if options.work_cpus_checkpoint_count != None:
system.work_cpus_ckpt_count = options.work_cpus_checkpoint_count
def findCptDir(options, maxtick, cptdir, testsys):
"""Figures out the directory from which the checkpointed state is read.
There are two different ways in which the directories holding checkpoints
can be named --
1. cpt.<benchmark name>.<instruction count when the checkpoint was taken>
2. cpt.<some number, usually the tick value when the checkpoint was taken>
This function parses through the options to figure out which one of the
above should be used for selecting the checkpoint, and then figures out
the appropriate directory.
It also sets the value of the maximum tick value till which the simulation
will run.
"""
from os.path import isdir, exists
from os import listdir
import re
if not isdir(cptdir):
fatal("checkpoint dir %s does not exist!", cptdir)
if options.at_instruction or options.simpoint:
inst = options.checkpoint_restore
if options.simpoint:
# assume workload 0 has the simpoint
if testsys.cpu[0].workload[0].simpoint == 0:
fatal('Unable to find simpoint')
inst += int(testsys.cpu[0].workload[0].simpoint)
checkpoint_dir = joinpath(cptdir, "cpt.%s.%s" % (options.bench, inst))
if not exists(checkpoint_dir):
fatal("Unable to find checkpoint directory %s", checkpoint_dir)
else:
dirs = listdir(cptdir)
expr = re.compile('cpt\.([0-9]*)')
cpts = []
for dir in dirs:
match = expr.match(dir)
if match:
cpts.append(match.group(1))
cpts.sort(lambda a,b: cmp(long(a), long(b)))
cpt_num = options.checkpoint_restore
if cpt_num > len(cpts):
fatal('Checkpoint %d not found', cpt_num)
maxtick = maxtick - int(cpts[cpt_num - 1])
checkpoint_dir = joinpath(cptdir, "cpt.%s" % cpts[cpt_num - 1])
return maxtick, checkpoint_dir
def scriptCheckpoints(options, maxtick, cptdir):
if options.at_instruction or options.simpoint:
checkpoint_inst = int(options.take_checkpoints)
# maintain correct offset if we restored from some instruction
if options.checkpoint_restore != None:
checkpoint_inst += options.checkpoint_restore
print "Creating checkpoint at inst:%d" % (checkpoint_inst)
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
print "exit cause = %s" % exit_cause
# skip checkpoint instructions should they exist
while exit_cause == "checkpoint":
exit_event = m5.simulate()
exit_cause = exit_event.getCause()
if exit_cause == "a thread reached the max instruction count":
m5.checkpoint(joinpath(cptdir, "cpt.%s.%d" % \
(options.bench, checkpoint_inst)))
print "Checkpoint written."
else:
when, period = options.take_checkpoints.split(",", 1)
when = int(when)
period = int(period)
num_checkpoints = 0
exit_event = m5.simulate(when - m5.curTick())
exit_cause = exit_event.getCause()
while exit_cause == "checkpoint":
exit_event = m5.simulate(when - m5.curTick())
exit_cause = exit_event.getCause()
if exit_cause == "simulate() limit reached":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
sim_ticks = when
max_checkpoints = options.max_checkpoints
while num_checkpoints < max_checkpoints and \
exit_cause == "simulate() limit reached":
if (sim_ticks + period) > maxtick:
exit_event = m5.simulate(maxtick - sim_ticks)
exit_cause = exit_event.getCause()
break
else:
exit_event = m5.simulate(period)
exit_cause = exit_event.getCause()
sim_ticks += period
while exit_event.getCause() == "checkpoint":
exit_event = m5.simulate(sim_ticks - m5.curTick())
if exit_event.getCause() == "simulate() limit reached":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
return exit_event
def benchCheckpoints(options, maxtick, cptdir):
exit_event = m5.simulate(maxtick - m5.curTick())
exit_cause = exit_event.getCause()
num_checkpoints = 0
max_checkpoints = options.max_checkpoints
while exit_cause == "checkpoint":
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
num_checkpoints += 1
if num_checkpoints == max_checkpoints:
exit_cause = "maximum %d checkpoints dropped" % max_checkpoints
break
exit_event = m5.simulate(maxtick - m5.curTick())
exit_cause = exit_event.getCause()
return exit_event
def repeatSwitch(testsys, repeat_switch_cpu_list, maxtick, switch_freq):
print "starting switch loop"
while True:
exit_event = m5.simulate(switch_freq)
exit_cause = exit_event.getCause()
if exit_cause != "simulate() limit reached":
return exit_event
m5.switchCpus(testsys, repeat_switch_cpu_list)
tmp_cpu_list = []
for old_cpu, new_cpu in repeat_switch_cpu_list:
tmp_cpu_list.append((new_cpu, old_cpu))
repeat_switch_cpu_list = tmp_cpu_list
if (maxtick - m5.curTick()) <= switch_freq:
exit_event = m5.simulate(maxtick - m5.curTick())
return exit_event
def run(options, root, testsys, cpu_class):
if options.maxtick:
maxtick = options.maxtick
elif options.maxtime:
simtime = m5.ticks.seconds(simtime)
print "simulating for: ", simtime
maxtick = simtime
else:
maxtick = m5.MaxTick
if options.checkpoint_dir:
cptdir = options.checkpoint_dir
elif m5.options.outdir:
cptdir = m5.options.outdir
else:
cptdir = getcwd()
if options.fast_forward and options.checkpoint_restore != None:
fatal("Can't specify both --fast-forward and --checkpoint-restore")
if options.standard_switch and not options.caches:
fatal("Must specify --caches when using --standard-switch")
if options.standard_switch and options.repeat_switch:
fatal("Can't specify both --standard-switch and --repeat-switch")
if options.repeat_switch and options.take_checkpoints:
fatal("Can't specify both --repeat-switch and --take-checkpoints")
np = options.num_cpus
switch_cpus = None
if options.prog_interval:
for i in xrange(np):
testsys.cpu[i].progress_interval = options.prog_interval
if options.maxinsts:
for i in xrange(np):
testsys.cpu[i].max_insts_any_thread = options.maxinsts
if cpu_class:
switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
for i in xrange(np)]
for i in xrange(np):
if options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
switch_cpus[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload
switch_cpus[i].clock = testsys.cpu[i].clock
# simulation period
if options.maxinsts:
switch_cpus[i].max_insts_any_thread = options.maxinsts
# Add checker cpu if selected
if options.checker:
switch_cpus[i].addCheckerCpu()
testsys.switch_cpus = switch_cpus
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
if options.repeat_switch:
switch_class = getCPUClass(options.cpu_type)[0]
if switch_class.require_caches() and \
not options.caches:
print "%s: Must be used with caches" % str(switch_class)
sys.exit(1)
if not switch_class.support_take_over():
print "%s: CPU switching not supported" % str(switch_class)
sys.exit(1)
repeat_switch_cpus = [switch_class(switched_out=True, \
cpu_id=(i)) for i in xrange(np)]
for i in xrange(np):
repeat_switch_cpus[i].system = testsys
repeat_switch_cpus[i].workload = testsys.cpu[i].workload
repeat_switch_cpus[i].clock = testsys.cpu[i].clock
if options.maxinsts:
repeat_switch_cpus[i].max_insts_any_thread = options.maxinsts
if options.checker:
repeat_switch_cpus[i].addCheckerCpu()
testsys.repeat_switch_cpus = repeat_switch_cpus
if cpu_class:
repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
for i in xrange(np)]
else:
repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
for i in xrange(np)]
if options.standard_switch:
switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
for i in xrange(np)]
switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
for i in xrange(np)]
for i in xrange(np):
switch_cpus[i].system = testsys
switch_cpus_1[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload
switch_cpus_1[i].workload = testsys.cpu[i].workload
switch_cpus[i].clock = testsys.cpu[i].clock
switch_cpus_1[i].clock = testsys.cpu[i].clock
# if restoring, make atomic cpu simulate only a few instructions
if options.checkpoint_restore != None:
testsys.cpu[i].max_insts_any_thread = 1
# Fast forward to specified location if we are not restoring
elif options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
# Fast forward to a simpoint (warning: time consuming)
elif options.simpoint:
if testsys.cpu[i].workload[0].simpoint == 0:
fatal('simpoint not found')
testsys.cpu[i].max_insts_any_thread = \
testsys.cpu[i].workload[0].simpoint
# No distance specified, just switch
else:
testsys.cpu[i].max_insts_any_thread = 1
# warmup period
if options.warmup_insts:
switch_cpus[i].max_insts_any_thread = options.warmup_insts
# simulation period
if options.maxinsts:
switch_cpus_1[i].max_insts_any_thread = options.maxinsts
# attach the checker cpu if selected
if options.checker:
switch_cpus[i].addCheckerCpu()
switch_cpus_1[i].addCheckerCpu()
testsys.switch_cpus = switch_cpus
testsys.switch_cpus_1 = switch_cpus_1
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
# set the checkpoint in the cpu before m5.instantiate is called
if options.take_checkpoints != None and \
(options.simpoint or options.at_instruction):
offset = int(options.take_checkpoints)
# Set an instruction break point
if options.simpoint:
for i in xrange(np):
if testsys.cpu[i].workload[0].simpoint == 0:
fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
testsys.cpu[i].max_insts_any_thread = checkpoint_inst
# used for output below
options.take_checkpoints = checkpoint_inst
else:
options.take_checkpoints = offset
# Set all test cpus with the right number of instructions
# for the upcoming simulation
for i in xrange(np):
testsys.cpu[i].max_insts_any_thread = offset
checkpoint_dir = None
if options.checkpoint_restore != None:
maxtick, checkpoint_dir = findCptDir(options, maxtick, cptdir, testsys)
m5.instantiate(checkpoint_dir)
if options.standard_switch or cpu_class:
if options.standard_switch:
print "Switch at instruction count:%s" % \
str(testsys.cpu[0].max_insts_any_thread)
exit_event = m5.simulate()
elif cpu_class and options.fast_forward:
print "Switch at instruction count:%s" % \
str(testsys.cpu[0].max_insts_any_thread)
exit_event = m5.simulate()
else:
print "Switch at curTick count:%s" % str(10000)
exit_event = m5.simulate(10000)
print "Switched CPUS @ tick %s" % (m5.curTick())
m5.switchCpus(testsys, switch_cpu_list)
if options.standard_switch:
print "Switch at instruction count:%d" % \
(testsys.switch_cpus[0].max_insts_any_thread)
#warmup instruction count may have already been set
if options.warmup_insts:
exit_event = m5.simulate()
else:
exit_event = m5.simulate(options.standard_switch)
print "Switching CPUS @ tick %s" % (m5.curTick())
print "Simulation ends instruction count:%d" % \
(testsys.switch_cpus_1[0].max_insts_any_thread)
m5.switchCpus(testsys, switch_cpu_list1)
# If we're taking and restoring checkpoints, use checkpoint_dir
# option only for finding the checkpoints to restore from. This
# lets us test checkpointing by restoring from one set of
# checkpoints, generating a second set, and then comparing them.
if options.take_checkpoints and options.checkpoint_restore:
if m5.options.outdir:
cptdir = m5.options.outdir
else:
cptdir = getcwd()
if options.take_checkpoints != None :
# Checkpoints being taken via the command line at <when> and at
# subsequent periods of <period>. Checkpoint instructions
# received from the benchmark running are ignored and skipped in
# favor of command line checkpoint instructions.
exit_event = scriptCheckpoints(options, maxtick, cptdir)
else:
if options.fast_forward:
m5.stats.reset()
print "**** REAL SIMULATION ****"
# If checkpoints are being taken, then the checkpoint instruction
# will occur in the benchmark code it self.
if options.repeat_switch and maxtick > options.repeat_switch:
exit_event = repeatSwitch(testsys, repeat_switch_cpu_list,
maxtick, options.repeat_switch)
else:
exit_event = benchCheckpoints(options, maxtick, cptdir)
print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
if options.checkpoint_at_end:
m5.checkpoint(joinpath(cptdir, "cpt.%d"))
if not m5.options.interactive:
sys.exit(exit_event.getCode())
| |
# coding:utf-8
__author__ = "seerjk"
# import
import MySQLdb as mysql
db_user = 'root'
db_pwd = 'redhat'
db_name = 'jiangkun'
db_host = '127.0.0.1'
db_port = 3306
def execute(sql_str):
# connect
db = mysql.connect(user=db_user, passwd=db_pwd, db=db_name, host=db_host, port=db_port)
db.autocommit(True)
cur = db.cursor()
# sql_str='insert into server values("python", 16)'
print "result:" + str(cur.execute(sql_str))
# cur.execute(sql_str)
# return cur.fetchall()
result_tuple = cur.fetchall()
db.close()
return result_tuple
def select_by_condition(sql_str):
'''
select by input sql_str
internal function
input: sql_str
return: result_tuple, number of rows
'''
# connect
db = mysql.connect(user=db_user, passwd=db_pwd, db=db_name, host=db_host, port=db_port)
# db.autocommit(True)
cur = db.cursor()
result_tuple = ()
influenced_rows = -1
try:
influenced_rows = cur.execute(sql_str)
result_tuple = cur.fetchall()
except mysql.Error, e:
# to do: write log
print "Error %d: %s" % (e.args[0], e.args[1])
finally:
db.close()
return result_tuple, influenced_rows
def select_all():
'''
return the whole user table in result_tuple
'''
sql_str = "select * from user"
result_tuple, rows = select_by_condition(sql_str)
return result_tuple
def select_passwd_by_name(name):
'''
input name
return passwd
if name not in user table: return -1 int
elif name exist: return passwd str
'''
sql_str = "select * from user where name='%s'" % name
result_tuple, rows = select_by_condition(sql_str)
if rows == 1:
return result_tuple[0][2]
else:
return -1
def select_name_by_id(id):
'''
input id
return name
if id not in user table: return -1 int
elif id exist: return name str
'''
id = int(id)
sql_str = "select name from user where id=%d" % id
result_tuple, rows = select_by_condition(sql_str)
print "*******"
print rows
if rows == 1:
return result_tuple[0][0]
else:
return -1
def is_name_exist(name):
'''
input name
if name not exist: return False
if name exist: return True
'''
sql_str = "select * from user where name='%s'" % name
result_tuple, rows = select_by_condition(sql_str)
if rows == 1:
return True
else:
return False
def is_name_exist(name):
'''
input name
if name not exist: return False
if name exist: return True
'''
sql_str = "select * from user where name='%s'" % name
result_tuple, rows = select_by_condition(sql_str)
if rows == 1:
return True
else:
return False
def change_user(sql_str):
'''
insert, update, delete
internal function
input: sql_str
if ok and 1 row affected: return 1
if ok and 0 row affected: return 0
if error: return -1
'''
# connect
db = mysql.connect(user=db_user, passwd=db_pwd, db=db_name, host=db_host, port=db_port)
cur = db.cursor()
influenced_rows = -1
try:
influenced_rows = cur.execute(sql_str)
db.commit()
except mysql.Error, e:
# to do: write log
print "Error %d: %s" % (e.args[0], e.args[1])
# Rollback in case there is any error
db.rollback()
finally:
db.close()
return influenced_rows
def change_user_passwd(name, old_passwd, new_passwd):
'''
change user's passwd
input name, old_passwd, new_passwd
if old_passwd != passwd in db: return -1
if ok: return 1
'''
pass
def user_sign_up(name, passwd):
'''
sign up
input name, passwd
if error: return -1
'''
status_tmp = select_passwd_by_name(name)
if status_tmp == -1:
# name not in user table -- insert
sql_str = "insert into user (name, passwd) values ('%s', '%s')" % (name, passwd)
result_code = change_user(sql_str)
else:
# name in user table -- update
return -1
if result_code == -1:
return -1
else:
# 1
return 1
def user_add(name, passwd):
'''
user add: sign up or change passwd
input name, passwd
if error: return -1
if insert a new recode: return 1
if update (change passwd): return 0
'''
user_exist = is_name_exist(name)
if user_exist == False:
# name not in user table -- insert
sql_str = "insert into user (name, passwd) values ('%s', '%s')" % (name, passwd)
result_code = change_user(sql_str)
oper_code = 1
else:
# name in user table -- update
# return 0
sql_str = "update user set passwd='%s' where name='%s'" % (passwd, name)
result_code = change_user(sql_str)
oper_code = 0
if result_code == -1:
return -1
else:
return oper_code
def user_delete_by_name(name):
'''
delete row
input: name
if name exist: do delete and return 1
if not exist: return -1
'''
# user_exist = is_name_exist(name)
# if user_exist == False:
# return -1
# else:
# # exist, do delete
# sql_str = "delete from user where name='%s'" % name
# result_code = change_user(sql_str)
# if name not exist, do delete is ok and 0 rows affected
sql_str = "delete from user where name='%s'" % name
result_code = change_user(sql_str)
return result_code
def user_delete_by_id(id):
'''
delete row
input: name
if name exist: do delete and return 1
if not exist: return 0
'''
# if name not exist, do delete is ok and 0 rows affected
id = int(id)
sql_str = "delete from user where id='%d'" % id
result_code = change_user(sql_str)
return result_code
def main():
# sql_str='insert into server values("python", 16)'
# sql_str="select * from user where name='i'"
# res_tuple = execute(sql_str)
# print select_passwd_by_name('admin')
print "***** testing add"
print user_add("juju", "12345")
print user_add("ruru", "134")
res_tuple = select_all()
for c in res_tuple:
print "%s %s %s" % c
print "***** testing delete"
print user_delete_by_name("juju")
print user_delete_by_id(12)
res_tuple = select_all()
for c in res_tuple:
print "%s %s %s" % c
print select_name_by_id(12)
if __name__ == "__main__":
main()
| |
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp C-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
data_motion.DataMotionMixin):
"""NetApp block storage library for Data ONTAP (Cluster-mode).
Version history:
.. code-block:: none
1.0.0 - Driver development before Wallaby
2.0.0 - Add support for QoS minimums specs
Add support for dynamic Adaptive QoS policy group creation
3.0.0 - Add support for Intra-cluster Storage assisted volume migration
Add support for revert to snapshot
"""
VERSION = "3.0.0"
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, driver_name, driver_protocol, **kwargs):
super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name,
driver_protocol,
**kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
self.driver_mode = 'cluster'
self.failed_over_backend_name = kwargs.get('active_backend_id')
self.failed_over = self.failed_over_backend_name is not None
self.replication_enabled = (
True if self.get_replication_backend_names(
self.configuration) else False)
def do_setup(self, context):
super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
# cDOT API client
self.zapi_client = dot_utils.get_client_for_backend(
self.failed_over_backend_name or self.backend_name)
self.vserver = self.zapi_client.vserver
# Storage service catalog
self.ssc_library = capabilities.CapabilitiesLibrary(
self.driver_protocol, self.vserver, self.zapi_client,
self.configuration)
self.ssc_library.check_api_permissions()
self.using_cluster_credentials = (
self.ssc_library.cluster_user_supported())
# Performance monitoring library
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
self.zapi_client)
def _update_zapi_client(self, backend_name):
"""Set cDOT API client for the specified config backend stanza name."""
self.zapi_client = dot_utils.get_client_for_backend(backend_name)
self.vserver = self.zapi_client.vserver
self.ssc_library._update_for_failover(self.zapi_client,
self._get_flexvol_to_pool_map())
ssc = self.ssc_library.get_ssc()
self.perf_library._update_for_failover(self.zapi_client, ssc)
# Clear LUN table cache
self.lun_table = {}
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
if not self._get_flexvol_to_pool_map():
msg = _('No pools are available for provisioning volumes. '
'Ensure that the configuration option '
'netapp_pool_name_search_pattern is set correctly.')
raise na_utils.NetAppDriverException(msg)
self._add_looping_tasks()
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
# Note(cknight): Run the update once in the current thread to prevent a
# race with the first invocation of _update_volume_stats.
self._update_ssc()
# Add the task that updates the slow-changing storage service catalog
self.loopingcalls.add_task(self._update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR)
self.loopingcalls.add_task(
self._handle_housekeeping_tasks,
loopingcalls.TEN_MINUTES,
0)
super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks()
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
active_backend = self.failed_over_backend_name or self.backend_name
# Add the task that harvests soft-deleted QoS policy groups.
if self.using_cluster_credentials:
self.zapi_client.remove_unused_qos_policy_groups()
LOG.debug("Current service state: Replication enabled: %("
"replication)s. Failed-Over: %(failed)s. Active Backend "
"ID: %(active)s",
{
'replication': self.replication_enabled,
'failed': self.failed_over,
'active': active_backend,
})
# Create pool mirrors if whole-backend replication configured
if self.replication_enabled and not self.failed_over:
self.ensure_snapmirrors(
self.configuration, self.backend_name,
self.ssc_library.get_ssc_flexvol_names())
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, self.vserver,
self.ssc_library.get_ssc_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None,
qos_policy_group_is_adaptive=False):
"""Creates a LUN, handling Data ONTAP differences as needed."""
self.zapi_client.create_lun(
volume_name, lun_name, size, metadata, qos_policy_group_name,
qos_policy_group_is_adaptive)
def _create_lun_handle(self, metadata, vserver=None):
"""Returns LUN handle based on filer type."""
vserver = vserver or self.vserver
return '%s:%s' % (self.vserver, metadata['Path'])
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
initiator_igroups = self.zapi_client.get_igroup_by_initiators(
initiator_list)
lun_maps = self.zapi_client.get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(na_utils.OPENSTACK_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return igroup_name, lun_map['lun-id']
return None, None
def _clone_lun(self, name, new_name, space_reserved=None,
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0, source_snapshot=None, is_snapshot=False,
qos_policy_group_is_adaptive=False):
"""Clone LUN with the given handle to the new name."""
if not space_reserved:
space_reserved = self.lun_space_reservation
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
self.zapi_client.clone_lun(
volume, name, new_name, space_reserved,
qos_policy_group_name=qos_policy_group_name,
src_block=src_block, dest_block=dest_block,
block_count=block_count,
source_snapshot=source_snapshot,
is_snapshot=is_snapshot,
qos_policy_group_is_adaptive=qos_policy_group_is_adaptive)
LOG.debug("Cloned LUN with new name %s", new_name)
lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No cloned LUN named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % new_name)
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(
block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
def _create_lun_meta(self, lun):
"""Creates LUN metadata dictionary."""
self.zapi_client.check_is_naelement(lun)
meta_dict = {}
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
meta_dict['UUID'] = lun.get_child_content('uuid')
return meta_dict
def _get_fc_target_wwpns(self, include_partner=True):
return self.zapi_client.get_fc_target_wwpns()
def _update_volume_stats(self, filter_function=None,
goodness_function=None):
"""Retrieve backend stats."""
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.driver_protocol
data['pools'] = self._get_pool_stats(
filter_function=filter_function,
goodness_function=goodness_function)
data['sparse_copy_volume'] = True
# Used for service state report
data['replication_enabled'] = self.replication_enabled
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (Data ONTAP flexvol) stats.
Pool statistics are assembled from static driver capabilities, the
Storage Service Catalog of flexvol attributes, and real-time capacity
and controller utilization metrics. The pool name is the flexvol name.
"""
pools = []
ssc = self.ssc_library.get_ssc()
if not ssc:
return pools
# Utilization and performance metrics require cluster-scoped
# credentials
if self.using_cluster_credentials:
# Get up-to-date node utilization metrics just once
self.perf_library.update_performance_cache(ssc)
# Get up-to-date aggregate capacities just once
aggregates = self.ssc_library.get_ssc_aggregates()
aggr_capacities = self.zapi_client.get_aggregate_capacities(
aggregates)
else:
aggr_capacities = {}
for ssc_vol_name, ssc_vol_info in ssc.items():
pool = dict()
# Add storage service catalog data
pool.update(ssc_vol_info)
# Add driver capabilities and config info
pool['QoS_support'] = self.using_cluster_credentials
pool['multiattach'] = True
pool['online_extend_support'] = True
pool['consistencygroup_support'] = True
pool['consistent_group_snapshot_enabled'] = True
pool['reserved_percentage'] = self.reserved_percentage
pool['max_over_subscription_ratio'] = (
self.max_over_subscription_ratio)
# Add up-to-date capacity info
capacity = self.zapi_client.get_flexvol_capacity(
flexvol_name=ssc_vol_name)
size_total_gb = capacity['size-total'] / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
size_available_gb = capacity['size-available'] / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
if self.configuration.netapp_driver_reports_provisioned_capacity:
luns = self.zapi_client.get_lun_sizes_by_volume(
ssc_vol_name)
provisioned_cap = 0
for lun in luns:
lun_name = lun['path'].split('/')[-1]
# Filtering luns that matches the volume name template to
# exclude snapshots
if volume_utils.extract_id_from_volume_name(lun_name):
provisioned_cap = provisioned_cap + lun['size']
pool['provisioned_capacity_gb'] = na_utils.round_down(
float(provisioned_cap) / units.Gi)
if self.using_cluster_credentials:
dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
ssc_vol_name)
else:
dedupe_used = 0.0
pool['netapp_dedupe_used_percent'] = na_utils.round_down(
dedupe_used)
aggregate_name = ssc_vol_info.get('netapp_aggregate')
aggr_capacity = aggr_capacities.get(aggregate_name, {})
pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
'percent-used', 0)
# Add utilization data
utilization = self.perf_library.get_node_utilization_for_pool(
ssc_vol_name)
pool['utilization'] = na_utils.round_down(utilization)
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
# Add replication capabilities/stats
pool.update(
self.get_replication_backend_stats(self.configuration))
pools.append(pool)
return pools
def _update_ssc(self):
"""Refresh the storage service catalog with the latest set of pools."""
self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
def _get_flexvol_to_pool_map(self):
"""Get the flexvols that match the pool name search pattern.
The map is of the format suitable for seeding the storage service
catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
"""
pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
pools = {}
flexvol_names = self.zapi_client.list_flexvols()
for flexvol_name in flexvol_names:
msg_args = {
'flexvol': flexvol_name,
'vol_pattern': pool_regex.pattern,
}
if pool_regex.match(flexvol_name):
msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
LOG.debug(msg, msg_args)
pools[flexvol_name] = {'pool_name': flexvol_name}
else:
msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
LOG.debug(msg, msg_args)
return pools
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Delete even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
def _setup_qos_for_volume(self, volume, extra_specs):
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume, extra_specs)
except exception.Invalid:
msg = _('Invalid QoS specification detected while getting QoS '
'policy for volume %s') % volume['id']
raise exception.VolumeBackendAPIException(data=msg)
pool = volume_utils.extract_host(volume['host'], level='pool')
qos_min_support = self.ssc_library.is_qos_min_supported(pool)
self.zapi_client.provision_qos_policy_group(qos_policy_group_info,
qos_min_support)
return qos_policy_group_info
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
if self.replication_enabled:
return {'replication_status': fields.ReplicationStatus.ENABLED}
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
is_adaptive = na_utils.is_qos_policy_group_spec_adaptive(
qos_policy_group_info)
self.zapi_client.mark_qos_policy_group_for_deletion(
qos_policy_group_info, is_adaptive)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
try:
qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
volume)
except exception.Invalid:
# Unmanage even if there was invalid qos policy specified for the
# volume.
qos_policy_group_info = None
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover a backend to a secondary replication target."""
return self._failover_host(volumes, secondary_id=secondary_id)
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
ssc = self.ssc_library.get_ssc()
return list(ssc.keys())
def create_group(self, group):
"""Driver entry point for creating a generic volume group.
ONTAP does not maintain an actual Group construct. As a result, no
communication to the backend is necessary for generic volume group
creation.
:returns: Hard-coded model update for generic volume group model.
"""
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
def delete_group(self, group, volumes):
"""Driver entry point for deleting a group.
:returns: Updated group model and list of volume models
for the volumes that were deleted.
"""
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._delete_lun(volume['name'])
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume['id'],
'status': 'error_deleting'})
LOG.exception("Volume %(vol)s in the group could not be "
"deleted.", {'vol': volume})
return model_update, volumes_model_update
def update_group(self, group, add_volumes=None, remove_volumes=None):
"""Driver entry point for updating a generic volume group.
Since no actual group construct is ever created in ONTAP, it is not
necessary to update any metadata on the backend. Since this is a NO-OP,
there is guaranteed to be no change in any of the volumes' statuses.
"""
return None, None, None
def create_group_snapshot(self, group_snapshot, snapshots):
"""Creates a Cinder group snapshot object.
The Cinder group snapshot object is created by making use of an
ephemeral ONTAP consistency group snapshot in order to provide
write-order consistency for a set of flexvol snapshots. First, a list
of the flexvols backing the given Cinder group must be gathered. An
ONTAP group-snapshot of these flexvols will create a snapshot copy of
all the Cinder volumes in the generic volume group. For each Cinder
volume in the group, it is then necessary to clone its backing LUN from
the ONTAP cg-snapshot. The naming convention used for the clones is
what indicates the clone's role as a Cinder snapshot and its inclusion
in a Cinder group. The ONTAP cg-snapshot of the flexvols is no longer
required after having cloned the LUNs backing the Cinder volumes in
the Cinder group.
:returns: An implicit update for group snapshot and snapshots models
that is interpreted by the manager to set their models to
available.
"""
try:
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
self._create_consistent_group_snapshot(group_snapshot,
snapshots)
else:
for snapshot in snapshots:
self._create_snapshot(snapshot)
except Exception as ex:
err_msg = (_("Create group snapshot failed (%s).") % ex)
LOG.exception(err_msg, resource=group_snapshot)
raise na_utils.NetAppDriverException(err_msg)
return None, None
def _create_consistent_group_snapshot(self, group_snapshot, snapshots):
flexvols = set()
for snapshot in snapshots:
flexvols.add(volume_utils.extract_host(
snapshot['volume']['host'], level='pool'))
self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
for snapshot in snapshots:
self._clone_lun(snapshot['volume']['name'], snapshot['name'],
source_snapshot=group_snapshot['id'])
for flexvol in flexvols:
try:
self.zapi_client.wait_for_busy_snapshot(
flexvol, group_snapshot['id'])
self.zapi_client.delete_snapshot(
flexvol, group_snapshot['id'])
except exception.SnapshotIsBusy:
self.zapi_client.mark_snapshot_for_deletion(
flexvol, group_snapshot['id'])
def delete_group_snapshot(self, group_snapshot, snapshots):
"""Delete LUNs backing each snapshot in the group snapshot.
:returns: An implicit update for snapshots models that is interpreted
by the manager to set their models to deleted.
"""
for snapshot in snapshots:
self._delete_lun(snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
def create_group_from_src(self, group, volumes, group_snapshot=None,
snapshots=None, source_group=None,
source_vols=None):
"""Creates a group from a group snapshot or a group of cinder vols.
:returns: An implicit update for the volumes model that is
interpreted by the manager as a successful operation.
"""
LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
volume_model_updates = []
if group_snapshot:
vols = zip(volumes, snapshots)
for volume, snapshot in vols:
source = {
'name': snapshot['name'],
'size': snapshot['volume_size'],
}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
else:
vols = zip(volumes, source_vols)
for volume, old_src_vref in vols:
src_lun = self._get_lun_from_table(old_src_vref['name'])
source = {'name': src_lun.name, 'size': old_src_vref['size']}
volume_model_update = self._clone_source_to_destination(
source, volume)
if volume_model_update is not None:
volume_model_update['id'] = volume['id']
volume_model_updates.append(volume_model_update)
return None, volume_model_updates
def _move_lun(self, volume, src_ontap_volume, dest_ontap_volume,
dest_lun_name=None):
"""Moves LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_move(
volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_lun_name=dest_lun_name)
LOG.debug('Start moving LUN %s from %s to %s. '
'Job UUID is %s.', volume.name, src_ontap_volume,
dest_ontap_volume, job_uuid)
def _wait_lun_move_complete():
move_status = self.zapi_client.get_lun_move_status(job_uuid)
LOG.debug('Waiting for LUN move job %s to complete. '
'Current status is: %s.', job_uuid,
move_status['job-status'])
if not move_status:
status_error_msg = (_("Error moving LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif move_status['job-status'] == 'destroyed':
status_error_msg = (_('Error moving LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
move_status['last-failure-reason']))
elif move_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_move_complete)
timer.start(
interval=15,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except loopingcall.LoopingCallTimeOut:
msg = (_('Timeout waiting to complete move operation of LUN %s.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _cancel_lun_copy(self, job_uuid, volume, dest_pool, dest_backend_name):
"""Cancel an on-going lun copy operation."""
try:
# NOTE(sfernand): Another approach would be first checking if
# the copy operation isn't in `destroying` or `destroyed` states
# before issuing cancel.
self.zapi_client.cancel_lun_copy(job_uuid)
except na_utils.NetAppDriverException:
dest_client = dot_utils.get_client_for_backend(dest_backend_name)
lun_path = '/vol/%s/%s' % (dest_pool, volume.name)
try:
dest_client.destroy_lun(lun_path)
except Exception:
LOG.warning('Error cleaning up LUN %s in destination volume. '
'Verify if destination volume still exists in '
'pool %s and delete it manually to avoid unused '
'resources.', lun_path, dest_pool)
def _copy_lun(self, volume, src_ontap_volume, src_vserver,
dest_ontap_volume, dest_vserver, dest_lun_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_copy(
volume.name, dest_ontap_volume, dest_vserver,
src_ontap_volume=src_ontap_volume, src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
LOG.debug('Start copying LUN %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
{'vol': volume.name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_ontap_volume,
'job': job_uuid})
def _wait_lun_copy_complete():
copy_status = self.zapi_client.get_lun_copy_status(job_uuid)
LOG.debug('Waiting for LUN copy job %s to complete. Current '
'status is: %s.', job_uuid, copy_status['job-status'])
if not copy_status:
status_error_msg = (_("Error copying LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
copy_status['last-failure-reason']))
elif copy_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_copy_complete)
timer.start(
interval=10,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if cancel_on_error:
self._cancel_lun_copy(job_uuid, volume, dest_ontap_volume,
dest_backend_name=dest_backend_name)
if isinstance(e, loopingcall.LoopingCallTimeOut):
ctxt.reraise = False
msg = (_('Timeout waiting volume %s to complete '
'migration.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _finish_migrate_volume_to_vserver(self, src_volume):
"""Finish volume migration to another vserver within the cluster."""
# The source volume can be safely deleted after a successful migration.
self.delete_volume(src_volume)
# LUN cache for current backend can be deleted after migration.
self._delete_lun_from_table(src_volume.name)
def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver,
dest_pool, dest_vserver, dest_backend_name):
"""Migrate volume to a another vserver within the same cluster."""
LOG.info('Migrating volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# NOTE(sfernand): Migrating to a different vserver relies on coping
# operations which are always disruptive, as it requires the
# destination volume to be added as a new block device to the Nova
# instance. This differs from migrating volumes in a same vserver,
# since we can make use of a LUN move operation without the
# need of changing the iSCSI target.
if volume.status != fields.VolumeStatus.AVAILABLE:
msg = _("Volume status must be 'available' in order to "
"migrate volume to another vserver.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vserver_peer_application = 'lun_copy'
self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver,
[vserver_peer_application])
self._copy_lun(volume, src_pool, src_vserver, dest_pool,
dest_vserver, dest_backend_name=dest_backend_name,
cancel_on_error=True)
self._finish_migrate_volume_to_vserver(volume)
LOG.info('Successfully migrated volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s '
'to %(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# No model updates are necessary, so return empty dict
return {}
def _finish_migrate_volume_to_pool(self, src_volume, dest_pool):
"""Finish volume migration to another pool within the same vserver."""
# LUN cache must be updated with new path and volume information.
lun = self._get_lun_from_table(src_volume.name)
new_lun_path = '/vol/%s/%s' % (dest_pool, src_volume.name)
lun.metadata['Path'] = new_lun_path
lun.metadata['Volume'] = dest_pool
def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver,
dest_backend_name):
"""Migrate volume to another Cinder Pool within the same vserver."""
LOG.info('Migrating volume %(vol)s from pool %(src)s to '
'%(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
updates = {}
try:
self._move_lun(volume, src_pool, dest_pool)
except na_utils.NetAppDriverTimeout:
error_msg = (_('Timeout waiting volume %s to complete migration.'
'Volume status is set to maintenance to prevent '
'performing operations with this volume. Check the '
'migration status on the storage side and set '
'volume status manually if migration succeeded.'))
LOG.warning(error_msg, volume.id)
updates['status'] = fields.VolumeStatus.MAINTENANCE
except na_utils.NetAppDriverException as e:
error_msg = (_('Failed to migrate volume %(vol)s from pool '
'%(src)s to %(dest)s. %(err)s'))
raise na_utils.NetAppDriverException(
error_msg % {'vol': volume.id, 'src': src_pool,
'dest': dest_pool, 'err': e})
self._finish_migrate_volume_to_pool(volume, dest_pool)
LOG.info('Successfully migrated volume %(vol)s from pool %(src)s '
'to %(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
return updates
def migrate_volume(self, context, volume, host):
"""Migrate Cinder volume to the specified pool or vserver."""
return self.migrate_volume_ontap_assisted(
volume, host, self.backend_name, self.configuration.netapp_vserver)
def revert_to_snapshot(self, volume, snapshot):
"""Driver entry point for reverting volume to snapshot."""
try:
self._revert_to_snapshot(volume, snapshot)
except Exception:
raise exception.VolumeBackendAPIException(
"Revert snapshot failed.")
def _revert_to_snapshot(self, volume, snapshot):
"""Sets up all required resources for _swap_luns.
If _swap_luns fails, the cloned LUN is destroyed.
"""
new_lun_name = self._clone_snapshot(snapshot["name"])
LOG.debug("Cloned from snapshot: %s.", new_lun_name)
lun = self._get_lun_from_table(volume["name"])
volume_path = lun.metadata["Path"]
seg = volume_path.split("/")
lun_name = seg[-1]
flexvol_name = seg[2]
try:
self._swap_luns(lun_name, new_lun_name, flexvol_name)
except Exception:
LOG.error("Swapping LUN from %s to %s failed.", lun_name,
new_lun_name)
with excutils.save_and_reraise_exception():
try:
LOG.debug("Deleting temporary reverted LUN %s.",
new_lun_name)
new_lun_path = "/vol/%s/%s" % (flexvol_name, new_lun_name)
self.zapi_client.destroy_lun(new_lun_path)
except Exception:
LOG.error("Failure deleting temporary reverted LUN %s. "
"A manual deletion is required.", new_lun_name)
def _clone_snapshot(self, snapshot_name):
"""Returns the name of the LUN cloned from snapshot.
Creates a LUN with same metadata as original LUN and then clones
from snapshot. If clone operation fails, the new LUN is deleted.
"""
snapshot_lun = self._get_lun_from_table(snapshot_name)
snapshot_path = snapshot_lun.metadata["Path"]
lun_name = snapshot_path.split("/")[-1]
flexvol_name = snapshot_path.split("/")[2]
LOG.info("Cloning LUN %s from snapshot %s in volume %s.", lun_name,
snapshot_name, flexvol_name)
metadata = snapshot_lun.metadata
block_count = self._get_lun_block_count(snapshot_path)
if block_count == 0:
msg = _("%s cannot be reverted using clone operation"
" as it contains no blocks.")
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
new_snap_name = "new-%s" % snapshot_name
self.zapi_client.create_lun(
flexvol_name, new_snap_name,
six.text_type(snapshot_lun.size), metadata)
try:
self._clone_lun(snapshot_name, new_snap_name,
block_count=block_count)
return new_snap_name
except Exception:
with excutils.save_and_reraise_exception():
try:
new_lun_path = "/vol/%s/%s" % (flexvol_name, new_snap_name)
self.zapi_client.destroy_lun(new_lun_path)
except Exception:
LOG.error("Failure deleting temporary reverted LUN %s. "
"A manual deletion is required.", new_snap_name)
def _swap_luns(self, original_lun, new_lun, flexvol_name):
"""Swaps cloned and original LUNs using a temporary LUN.
Moves the original LUN to a temporary path, then moves the cloned LUN
to the original path (if this fails, moves the temporary LUN back as
original LUN) and finally destroys the LUN with temporary path.
"""
tmp_lun = "tmp-%s" % original_lun
original_path = "/vol/%s/%s" % (flexvol_name, original_lun)
tmp_path = "/vol/%s/%s" % (flexvol_name, tmp_lun)
new_path = "/vol/%s/%s" % (flexvol_name, new_lun)
LOG.debug("Original Path: %s.", original_path)
LOG.debug("Temporary Path: %s.", tmp_path)
LOG.debug("New Path %s.", new_path)
try:
self.zapi_client.move_lun(original_path, tmp_path)
except Exception:
msg = _("Failure moving original LUN from %s to %s." %
(original_path, tmp_path))
raise exception.VolumeBackendAPIException(data=msg)
try:
self.zapi_client.move_lun(new_path, original_path)
except Exception:
LOG.debug("Move temporary reverted LUN failed. Moving back "
"original LUN to original path.")
try:
self.zapi_client.move_lun(tmp_path, original_path)
except Exception:
LOG.error("Could not move original LUN path from %s to %s. "
"Cinder may lose the volume management. Please, you "
"should move it back manually.",
tmp_path, original_path)
msg = _("Failure moving temporary reverted LUN from %s to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (new_path, original_path))
try:
self.zapi_client.destroy_lun(tmp_path)
except Exception:
LOG.error("Failure deleting old LUN %s. A manual deletion "
"is required.", tmp_lun)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
library to work with compute.
"""
import fnmatch
from oslo_serialization import jsonutils
import six
from six.moves import range
class Store(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = _StorageDict()
return cls._instance
class _StorageDict(dict):
def keys(self, pat=None):
ret = super(_StorageDict, self).keys()
if pat is not None:
ret = fnmatch.filter(ret, pat)
return ret
def delete(self, key):
try:
del self[key]
except KeyError:
pass
def flushdb(self):
self.clear()
def hgetall(self, key):
"""Returns the hash for the given key
Creates the hash if the key doesn't exist.
"""
try:
return self[key]
except KeyError:
self[key] = {}
return self[key]
def hget(self, key, field):
hashdict = self.hgetall(key)
try:
return hashdict[field]
except KeyError:
hashdict[field] = {}
return hashdict[field]
def hset(self, key, field, val):
hashdict = self.hgetall(key)
hashdict[field] = val
def hmset(self, key, value_dict):
hashdict = self.hgetall(key)
for field, val in value_dict.items():
hashdict[field] = val
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception):
"""Duplicate exception class from real LDAP module."""
pass
class OBJECT_CLASS_VIOLATION(Exception):
"""Duplicate exception class from real LDAP module."""
pass
class SERVER_DOWN(Exception):
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
if inner.startswith('&'):
# cut off the &
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) and _match_query(r, attrs)
if inner.startswith('|'):
# cut off the |
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) or _match_query(r, attrs)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
(k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
start = 0
result = []
for pos in range(len(source)):
if source[pos] == '(':
if count == 0:
start = pos
count += 1
if source[pos] == ')':
count -= 1
if count == 0:
result.append(source[start:pos + 1])
return result
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
if key not in attrs:
return False
# This is a wild card search. Implemented as all or nothing for now.
if value == "*":
return True
if key != "objectclass":
return value in attrs[key]
# it is an objectclass check, so check subclasses
values = _subs(value)
for v in values:
if v in attrs[key]:
return True
return False
def _subs(value):
"""Returns a list of subclass strings.
The strings represent the ldap object class plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['novaProject']}
if value in subs:
return [value] + subs[value]
return [value]
def _from_json(encoded):
"""Convert attribute values from json representation.
Args:
encoded -- a json encoded string
Returns a list of strings
"""
return [str(x) for x in jsonutils.loads(encoded)]
def _to_json(unencoded):
"""Convert attribute values into json representation.
Args:
unencoded -- an unencoded string or list of strings. If it
is a single string, it will be converted into a list.
Returns a json string
"""
return jsonutils.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN()
key = "%s%s" % (self.__prefix, dn)
value_dict = {k: _to_json(v) for k, v in attr}
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN()
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
:param dn: a dn
:param attrs: a list of tuples in the following form::
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN()
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
for cmd, k, v in attrs:
values = _from_json(store.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
elif cmd == MOD_REPLACE:
values = [v]
else:
values.remove(v)
store.hset(key, k, _to_json(values))
def modrdn_s(self, dn, newrdn):
oldobj = self.search_s(dn, SCOPE_BASE)
if not oldobj:
raise NO_SUCH_OBJECT()
newdn = "%s,%s" % (newrdn, dn.partition(',')[2])
newattrs = oldobj[0][1]
modlist = []
for attrtype in newattrs.keys():
modlist.append((attrtype, newattrs[attrtype]))
self.add_s(newdn, modlist)
self.delete_s(dn)
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
Args:
dn -- dn to search under
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN()
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
if scope == SCOPE_BASE:
pattern = "%s%s" % (self.__prefix, dn)
keys = store.keys(pattern)
else:
keys = store.keys("%s*%s" % (self.__prefix, dn))
if not keys:
raise NO_SUCH_OBJECT()
objects = []
for key in keys:
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
attrs = {k: _from_json(v) for k, v in six.iteritems(attrs)}
# filter the objects by query
if not query or _match_query(query, attrs):
# filter the attributes by fields
attrs = {k: v for k, v in six.iteritems(attrs)
if not fields or k in fields}
objects.append((key[len(self.__prefix):], attrs))
return objects
@property
def __prefix(self):
"""Get the prefix to use for all keys."""
return 'ldap:'
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].received_cancelled)
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.operation_call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_server_batch(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_server_batch(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_send_message(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),)
self._call.start_server_batch(operations,
_SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(call_completion_queue, server_completion_queue,
_REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(
test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(cygrpc.ChannelArgs([]))
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
cygrpc.ChannelArgs([]))
state = _State()
server_thread_args = (state, server, server_completion_queue,
server_thread_pool,)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
client_completion_queue = cygrpc.CompletionQueue()
client_driver = _QueueDriver(client_condition, client_completion_queue,
client_due)
client_driver.start()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
client_call = channel.create_call(
None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies',
None, _INFINITE_FUTURE)
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_send_message(b'\x45\x56', _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call.start_client_batch(operations, tag)
client_due.add(tag)
client_calls.append(client_call)
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_driver.events(test_constants.RPC_CONCURRENCY *
_SUCCESS_CALL_FRACTION)
with client_condition:
for client_call in client_calls:
client_call.cancel()
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
# ---------------------------------------------------------------------
#
# Copyright (c) 2012 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ---------------------------------------------------------------------
# $Id: NetUtils.py 1047 2009-01-15 14:48:58Z graham $
#
# Network utilities
#
import logging
from string import split
from Functions import all, formatIntList, formatInt
def ipAdrStrToInt( adrStr ):
"""
Convert a dotted ip address to 32 bit integer.
"""
adrParts = split( adrStr, ".", 3 )
return (int(adrParts[0]) << 24) + (int(adrParts[1]) << 16) + (int(adrParts[2]) << 8) + int(adrParts[3])
def addBroadcastBits( iAdr, bitCount ):
"""
iAdr is 32 bit integer
bitCount is integer.
"""
# set the broadcast values
for idx in range( 32-bitCount ):
iAdr = iAdr | (1 << idx)
return iAdr
def getBroadcastAddressI( adrStr, bitStr ):
"""
Returns address as Integer
"""
# else has netmask part.
iAdr = ipAdrStrToInt( adrStr ) # integer address
bAdr = addBroadcastBits( iAdr, int( bitStr ) )
return bAdr
def getBroadcastAddress( adrStr ):
"""
Convert an ip address in form nn.nn.nn.nn/bb into its broadcast address format.
b/bb is optional and assumes caller knows what they are doing.
"""
netParts = split( adrStr, "/", 1 )
if ( len(netParts) == 1 ):
return adrStr
# else has netmask part.
# else has netmask part.
iAdr = ipAdrStrToInt( netParts[0] ) # integer address
bAdr = getBroadcastAddressI( netParts[0], netParts[1] )
return "%i.%i.%i.%i" % ( ((bAdr>>24)&0xFF), ((bAdr>>16)&0xFF), ((bAdr>>8)&0xFF), (bAdr&0xFF) )
# Helper functions for processing IP addresses as lists of int values
# (I think this representation will be easier to migrate to also support IPv6 - GK)
def parseIpAdrs(ipadrs):
"""
Parse IP address in dotted decomal form, and return a sequence of 4 numbers
"""
# Strip of any port and/or netmask bits
ipadrs = ipadrs.split('/')[0].split(':')[0]
return map(int, ipadrs.split('.'))
def parseNetAdrs(netadrs):
"""
Parse network address specification, returning a pair of:
(a) IP address bytes tuple
(b) number of '1' bits in netmask
"""
(ipadrs,maskbits) = netadrs.split('/')
return (parseIpAdrs(ipadrs),int(maskbits))
def formatIpAdrs(ipbytes):
"""
Format IP address string from IP addre4ss bytes
"""
# return "%d.%d.%d.%d" % ipbytes
return formatIntList(ipbytes,".")
def formatNetAdrs(ipbytes,maskbits):
"""
Format network address string from IP address bytes and mask bit count
"""
return formatIpAdrs(ipbytes)+("/%d" % maskbits)
def mkNetMask(ipbytes,maskbits=None):
"""
Make a network mask value as a sequence of IP address bytes
May be called with 1 or 2 arguments:
if 1 argument, it is a pair of (netbytes,maskbits)
if 2 arguments, the first is just netbytes, and the second is maskbits
"""
if not maskbits: (ipbytes,maskbits) = ipbytes
netmask = []
for b in ipbytes:
m = 0
if maskbits >= 8:
m = 255
elif maskbits > 0:
m = (0,
128,
128+64,
128+64+32,
128+64+32+16,
128+64+32+16+8,
128+64+32+16+8+4,
128+64+32+16+8+4+2)[maskbits]
netmask.append(m)
maskbits -= 8
return netmask
def mkBroadcastAddress(netbytes,maskbits=None):
"""
Make broadcast address for a given network
May be called with 1 or 2 arguments:
if 1 argument, it is a pair of (netbytes,maskbits)
if 2 arguments, the first is just netbytes, and the secvond is maskbits
"""
def makeadrbyte(m, a): return (~m | a) & 0xFF
if not maskbits: (netbytes,maskbits) = netbytes
netmask = mkNetMask(netbytes,maskbits)
return map(makeadrbyte, netmask, netbytes)
def ipInNetwork(ipbytes, netbytes, maskbits=None):
"""
Test if IP address is part of given network
May be called with 2 or 3 arguments:
if 2 arguments, the second is a pair of (netbytes,maskbits)
if 3 arguments, the second is just netbytes, and the third is maskbits
"""
def testadrbyte(m, n, a): return (m & a) == (m & n)
if not maskbits: (netbytes,maskbits) = netbytes
netmask = mkNetMask(netbytes, maskbits)
return all(testadrbyte, netmask, netbytes, ipbytes)
def getHostIpsAndMask():
"""
Helper function returns list of IP networks connected to the
current host.
Each value is in the form address/maskbits, e.g.
10.0.0.0/8
"""
result = list()
from socket import gethostbyname_ex, gethostname
try:
hosts = gethostbyname_ex( gethostname( ) )
for addr in hosts[2]:
# convert to ...
byts = parseIpAdrs(addr)
if byts[0] >= 192:
# class C
result.append( "%i.%i.%i.0/24" % (byts[0],byts[1],byts[2]) )
elif byts[0] >= 128:
# class B
result.append( "%i.%i.0.0/16" % (byts[0],byts[1]) )
else:
# class A
result.append( "%i.0.0.0/8" % (byts[0]) )
except Exception, ex :
_log = logging.getLogger('WebBrickLibs.MiscLib.NetUtils')
_log.exception(ex)
return result
# Helper functions for processing MAC addresses as lists of integers
def parseMacAdrs(macadrs):
"""
Parse Mac address in colon-hexadecimal form, and return a sequence of 6 numbers
"""
def hex(h): return int(h,16)
return map(hex, macadrs.split(':'))
def formatMacAdrs(macbytes,sep=":"):
"""
Format MAC address as colon-separated hexadecimals for webBrick command
"""
return formatIntList(macbytes, sep, formatInt("%02X"))
# test cases
def _test():
i = parseIpAdrs("193.123.216.121")
x = parseIpAdrs("193.123.216.200")
n = parseNetAdrs("193.123.216.64/26")
b = mkBroadcastAddress(*n)
assert formatIpAdrs(b) == "193.123.216.127"
assert ipInNetwork(i,*n)
assert ipInNetwork(b,*n)
assert not ipInNetwork(x,*n)
assert parseMacAdrs("01:34:67:9a:BC:eF") == [1,52,103,154,188,239]
assert formatMacAdrs([1,52,103,154,188,239],sep='-') == "01-34-67-9A-BC-EF"
_test()
# End $Id: NetUtils.py 1047 2009-01-15 14:48:58Z graham $
| |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon import settings
from polyaxon.proxies.schemas.gateway.api import get_api_location_config
from polyaxon.proxies.schemas.gateway.auth import (
get_auth_config,
get_auth_location_config,
)
from polyaxon.proxies.schemas.gateway.dns import get_dns_config, get_resolver
from polyaxon.proxies.schemas.gateway.redirect import get_redirect_config
from polyaxon.proxies.schemas.gateway.services import (
get_plugins_location_config,
get_services_location_config,
)
from polyaxon.proxies.schemas.gateway.ssl import get_ssl_config
from polyaxon.proxies.schemas.gateway.streams import get_streams_location_config
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.proxies_mark
class TestGatewaySchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_ssl(self):
expected = r"""
# SSL
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# intermediate configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] valid=60s;
resolver_timeout 2s;
ssl_certificate /etc/ssl/polyaxon/polyaxon.com.crt;
ssl_certificate_key /etc/ssl/polyaxon/polyaxon.com.key;
""" # noqa
assert get_ssl_config() == expected
expected = r"""
# SSL
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# intermediate configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] valid=60s;
resolver_timeout 2s;
ssl_certificate /foo/polyaxon.com.crt;
ssl_certificate_key /foo/polyaxon.com.key;
""" # noqa
settings.PROXIES_CONFIG.ssl_path = "/foo"
assert get_ssl_config() == expected
def test_redirect_config(self):
expected = r"""
server {
listen 80;
return 301 https://$host$request_uri;
}
""" # noqa
settings.PROXIES_CONFIG.ssl_enabled = False
assert get_redirect_config() == ""
settings.PROXIES_CONFIG.ssl_enabled = True
assert get_redirect_config() == expected
@pytest.mark.proxies_mark
class TestGatewayServicesSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_service_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=False)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
def test_services_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=False)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
def test_services_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
expected = r"""
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayRewriteServicesSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_service_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=True)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
def test_services_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(resolver=resolver, auth="", rewrite=True)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
def test_services_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
expected = r"""
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayExternalSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_external_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=False, external=True
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
def test_external_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=False, external=True
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
def test_external_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver,
auth=get_auth_config(),
rewrite=False,
external=True,
)
== expected
)
expected = r"""
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=False, external=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayRewriteExternalSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_external_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
def test_external_dns_backend(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth="", rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver,
auth=get_auth_config(),
rewrite=True,
external=True,
)
== expected
)
def test_external_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
expected = r"""
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.new-dns;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_services_location_config(
resolver=resolver, auth=get_auth_config(), rewrite=True, external=True
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayPluginsSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_no_plugins(self):
assert get_plugins_location_config(resolver="", auth="") == []
def test_plugins(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
assert (
len(
get_plugins_location_config(
resolver="", auth="", proxy_services=proxy_services
)
)
== 2
)
def test_plugins_dns_resolver(self):
settings.PROXIES_CONFIG.auth_enabled = False
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_use_resolver = False
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver, auth="", proxy_services=proxy_services
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.kube-system"
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
def test_plugins_dns_backend(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
resolver kube-dns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "kube-dns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver, auth="", proxy_services=proxy_services
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.kube-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.kube-system.svc.new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
def test_plugins_dns_prefix(self):
proxy_services = {"tensorboard": {"port": 6006}, "notebook": {"port": 8888}}
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
expected = r"""
location ~ /tensorboard/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/tensorboard/proxy/([-_.:\w]+)/(.*) /tensorboard/proxy/$1/$2 break;
proxy_pass http://$1:6006;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
location ~ /notebook/proxy/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
rewrite_log on;
rewrite ^/notebook/proxy/([-_.:\w]+)/(.*) /notebook/proxy/$1/$2 break;
proxy_pass http://$1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
"\n".join(
get_plugins_location_config(
resolver=resolver,
auth=get_auth_config(),
proxy_services=proxy_services,
)
)
== expected
)
@pytest.mark.proxies_mark
class TestGatewayStreamsSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_streams_location_with_auth_config(self):
expected = r"""
location /streams/ {
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
assert get_streams_location_config(resolver="", auth="") == expected
settings.PROXIES_CONFIG.streams_port = 8888
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.streams_host = "foo"
expected = r"""
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
assert (
get_streams_location_config(resolver="", auth=get_auth_config()) == expected
)
def test_streams_location_with_dns_prefix(self):
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = True
expected = r"""
location /streams/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
assert get_streams_location_config(resolver=resolver, auth="") == expected
expected = r"""
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver kube-dns.new-system.svc.new-dns valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.dns_prefix = "kube-dns.new-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "new-dns"
assert get_dns_config() == "kube-dns.new-system.svc.new-dns"
resolver = get_resolver()
assert (
get_streams_location_config(resolver=resolver, auth=get_auth_config())
== expected
)
@pytest.mark.proxies_mark
class TestGatewayApiSchemas(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_api_location_config(self):
expected = r"""
location = / {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
settings.PROXIES_CONFIG.api_port = 8888
settings.PROXIES_CONFIG.api_host = "foo"
expected = r"""
location = / {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
proxy_pass http://foo:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
settings.PROXIES_CONFIG.api_port = 443
settings.PROXIES_CONFIG.api_host = "polyaxon.foo.com"
expected = r"""
location = / {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /api/v1/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /ui/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /sso/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /static/ {
proxy_ssl_server_name on;
proxy_pass https://polyaxon.foo.com;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 443
settings.PROXIES_CONFIG.forward_proxy_host = "moo.foo.com"
expected = r"""
location = / {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /api/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /ui/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /sso/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
location /static/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host polyaxon.foo.com;
proxy_buffering off;
}
""" # noqa
assert get_api_location_config(resolver="") == expected
def test_auth_config(self):
settings.PROXIES_CONFIG.auth_enabled = True
expected = r"""
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
""" # noqa
assert get_auth_config() == expected
settings.PROXIES_CONFIG.auth_enabled = False
assert get_auth_config() == ""
def test_auth_location_config(self):
settings.PROXIES_CONFIG.auth_use_resolver = False
settings.PROXIES_CONFIG.dns_use_resolver = False
settings.PROXIES_CONFIG.auth_enabled = True
expected = r"""
location = /auth/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Use resolver but do not enable it for auth
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
expected = r"""
location = /auth/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
# Enable resolver for auth
settings.PROXIES_CONFIG.auth_use_resolver = True
expected = r"""
location = /auth/v1/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
def test_external_auth_location_config(self):
settings.PROXIES_CONFIG.auth_use_resolver = False
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.auth_external = "https://cloud.polyaxon.com"
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://cloud.polyaxon.com;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 443
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
# Use resolver but do not enable it for auth
settings.PROXIES_CONFIG.has_forward_proxy = False
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_dns_config() == "coredns.kube-system.svc.cluster.local"
resolver = get_resolver()
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://cloud.polyaxon.com;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver=resolver) == expected
# Add proxy
settings.PROXIES_CONFIG.has_forward_proxy = True
expected = r"""
location = /auth/v1/ {
proxy_ssl_server_name on;
proxy_pass https://127.0.0.1:8443;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host cloud.polyaxon.com;
internal;
}
""" # noqa
assert get_auth_location_config(resolver="") == expected
| |
# Copyright 2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest, functional
# datapipeline is not available in us-east-2 where we run our functional tests
# so we do a forced override here.
REGION = "us-west-2"
class DataPipelineTest(BaseTest):
def test_reporting(self):
factory = self.replay_flight_data("test_datapipeline_reporting")
session = factory()
client = session.client("datapipeline")
pipeline = client.create_pipeline(name="PipelinesFTW", uniqueId="PipelinesFTW")
pipe_id = pipeline["pipelineId"]
client.put_pipeline_definition(
pipelineId=pipe_id,
pipelineObjects=[
{
"id": "Default",
"name": "Default",
"fields": [{"key": "workerGroup", "stringValue": "workerGroup"}],
},
{
"id": "Schedule",
"name": "Schedule",
"fields": [
{"key": "startDateTime", "stringValue": "2012-12-12T00:00:00"},
{"key": "type", "stringValue": "Schedule"},
{"key": "period", "stringValue": "1 hour"},
{"key": "endDateTime", "stringValue": "2012-12-21T18:00:00"},
],
},
{
"id": "SayHello",
"name": "SayHello",
"fields": [
{"key": "type", "stringValue": "ShellCommandActivity"},
{"key": "command", "stringValue": "echo hello"},
{"key": "parent", "refValue": "Default"},
{"key": "schedule", "refValue": "Schedule"},
],
},
],
)
client.add_tags(pipelineId=pipe_id, tags=[{"key": "foo", "value": "bar"}])
client.activate_pipeline(pipelineId=pipe_id)
self.addCleanup(client.delete_pipeline, pipelineId=pipe_id)
p = self.load_policy(
{
"name": "datapipeline-report",
"resource": "datapipeline",
"filters": [{"tag:foo": "bar"}],
},
config={"region": REGION},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
resource = resources[0]
self.assertEqual(resource["name"], "PipelinesFTW")
self.assertEqual(resource["Tags"], [{"Key": "foo", "Value": "bar"}])
self.assertEqual(resource["lastActivationTime"], "2017-03-13T11:37:36")
self.assertEqual(resource["creationTime"], "2017-03-13T11:37:34")
self.assertEqual(resource["sphere"], "PIPELINE")
self.assertEqual(resource["version"], "1")
self.assertEqual(resource["id"], "df-0993359USAD6HT96D2W")
self.assertEqual(resource["pipelineState"], "SCHEDULING")
self.assertEqual(resource["accountId"], "644160558196")
self.assertEqual(resource["userId"], "AIDAIXI7ULG2SDYI3RBNM")
self.assertEqual(resource["firstActivationTime"], "2017-03-13T11:37:36")
def test_delete_datapipeline(self):
factory = self.replay_flight_data("test_datapipeline_delete")
p = self.load_policy(
{
"name": "delete-datapipeline",
"resource": "datapipeline",
"filters": [{"name": "test-delete-pipeline"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["name"], "test-delete-pipeline")
client = factory().client("datapipeline")
removed = client.describe_pipelines(pipelineIds=[resources[0]["id"]])
self.assertEqual(
removed["pipelineDescriptionList"][0]["fields"][12]["stringValue"],
"DELETING",
)
@functional
def test_tag_datapipeline(self):
factory = self.replay_flight_data("test_datapipeline_tag", region=REGION)
session = factory()
client = session.client("datapipeline")
pipeline = client.create_pipeline(
name="PipelineTagTest", uniqueId="PipelineTagTest1"
)
pipe_id = pipeline["pipelineId"]
self.addCleanup(client.delete_pipeline, pipelineId=pipe_id)
p = self.load_policy(
{
"name": "datapipeline-tag-test",
"resource": "datapipeline",
"filters": [{"name": "PipelineTagTest"}],
"actions": [{"type": "tag", "key": "key1", "value": "value1"}],
},
session_factory=factory,
)
p.run()
response = client.describe_pipelines(pipelineIds=[pipe_id])
self.assertEqual(
response["pipelineDescriptionList"][0]["tags"],
[{"key": "key1", "value": "value1"}],
)
@functional
def test_mark_datapipeline(self):
factory = self.replay_flight_data("test_datapipeline_mark", region=REGION)
session = factory()
client = session.client("datapipeline")
pipeline = client.create_pipeline(
name="PipelineMarkTest", uniqueId="PipelineMarkTest1"
)
pipe_id = pipeline["pipelineId"]
self.addCleanup(client.delete_pipeline, pipelineId=pipe_id)
p = self.load_policy(
{
"name": "datapipeline-mark-test",
"resource": "datapipeline",
"filters": [{"name": "PipelineMarkTest"}],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_mark",
"op": "delete",
"msg": "marked for op with no date",
"days": 7,
}
],
},
session_factory=factory,
)
p.run()
response = client.describe_pipelines(pipelineIds=[pipe_id])
self.assertEqual(
response["pipelineDescriptionList"][0]["tags"],
[{"key": "custodian_mark", "value": "marked for op with no date"}],
)
@functional
def test_remove_tag_datapipeline(self):
factory = self.replay_flight_data("test_datapipeline_remove_tag", region=REGION)
session = factory()
client = session.client("datapipeline")
pipeline = client.create_pipeline(
name="PipelineRemoveTagTest", uniqueId="PipelineRemoveTagTest1"
)
pipe_id = pipeline["pipelineId"]
self.addCleanup(client.delete_pipeline, pipelineId=pipe_id)
client.add_tags(
pipelineId=pipe_id,
tags=[{"key": "tag_to_remove", "value": "value of tag to remove"}],
)
response1 = client.describe_pipelines(pipelineIds=[pipe_id])
num_tags = len(response1["pipelineDescriptionList"][0]["tags"])
p = self.load_policy(
{
"name": "datapipeline-remove-tag-test",
"resource": "datapipeline",
"filters": [{"name": "PipelineRemoveTagTest"}],
"actions": [{"type": "remove-tag", "tags": ["tag_to_remove"]}],
},
session_factory=factory,
)
p.run()
response2 = client.describe_pipelines(pipelineIds=[pipe_id])
self.assertEqual(
len(response2["pipelineDescriptionList"][0]["tags"]), num_tags - 1
)
@functional
def test_marked_for_op_datapipeline(self):
factory = self.replay_flight_data(
"test_datapipeline_marked_for_op", region=REGION
)
session = factory()
client = session.client("datapipeline")
pipeline = client.create_pipeline(
name="PipelineMarkedForOpTest", uniqueId="PipelineMarkedForOpTest1"
)
pipe_id = pipeline["pipelineId"]
self.addCleanup(client.delete_pipeline, pipelineId=pipe_id)
client.add_tags(
pipelineId=pipe_id,
tags=[
{
"key": "pipeline_marked_for_op",
"value": "Pipeline marked for op: delete@2017-12-01",
}
],
)
p = self.load_policy(
{
"name": "datapipeline-marked-for-op-test",
"resource": "datapipeline",
"filters": [
{
"type": "marked-for-op",
"tag": "pipeline_marked_for_op",
"op": "delete",
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
| |
#!/usr/bin/env python3
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script parses conformance test output to produce testgrid entries
#
# Assumptions:
# - there is one log file and one JUnit file (true for current conformance tests..)
# - the log file contains ginkgo's output (true for kubetest and sonobuoy..)
# - the ginkgo output will give us start / end time, and overall success
#
# - the start timestamp is suitable as a testgrid ID (unique, monotonic)
#
# - the test ran in the current year unless --year is provided
# - the timestamps are parsed on a machine with the same local time (zone)
# settings as the machine that produced the logs
#
# The log file is the source of truth for metadata, the JUnit will be consumed
# by testgrid / gubernator for individual test case results
#
# Usage: see README.md
import re
import sys
import time
import datetime
import argparse
import json
import subprocess
from os import path
import glob
import atexit
# logs often contain ANSI escape sequences
# https://stackoverflow.com/a/14693789
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
# NOTE e2e logs use go's time.StampMilli ("Jan _2 15:04:05.000")
# Example log line with a timestamp:
# Jan 26 06:38:46.284: INFO: Running AfterSuite actions on all node
# the third ':' separates the date from the rest
E2E_LOG_TIMESTAMP_RE = re.compile(r'(... .\d \d\d:\d\d:\d\d\.\d\d\d):.*')
# Ginkgo gives a line like the following at the end of successful runs:
# SUCCESS! -- 123 Passed | 0 Failed | 0 Pending | 587 Skipped PASS
# we match this to detect overall success
E2E_LOG_SUCCESS_RE = re.compile(r'Test Suite Passed')
E2E_LOG_FAIL_RE = re.compile(r'Test Suite Failed')
def log_line_strip_escape_sequences(line):
return ANSI_ESCAPE_RE.sub('', line)
def parse_e2e_log_line_timestamp(line, year):
"""parses a ginkgo e2e log line for the leading timestamp
Args:
line (str) - the log line
year (str) - 'YYYY'
Returns:
timestamp (datetime.datetime) or None
"""
match = E2E_LOG_TIMESTAMP_RE.match(line)
if match is None:
return None
# note we add year to the timestamp because the actual timestamp doesn't
# contain one and we want a datetime object...
timestamp = year+' '+match.group(1)
return datetime.datetime.strptime(timestamp, '%Y %b %d %H:%M:%S.%f')
def parse_e2e_logfile(file_handle, year):
"""parse e2e logfile at path, assuming the log is from year
Args:
file_handle (file): the log file, iterated for lines
year (str): YYYY year logfile is from
Returns:
started (datetime.datetime), finished (datetime.datetime), passed (boolean)
"""
passed = started = finished = None
for line in file_handle:
line = log_line_strip_escape_sequences(line)
# try to get a timestamp from each line, keep the first one as
# start time, and the last one as finish time
timestamp = parse_e2e_log_line_timestamp(line, year)
if timestamp:
if started:
finished = timestamp
else:
started = timestamp
if passed is False:
# if we already have found a failure, ignore subsequent pass/fails
continue
if E2E_LOG_SUCCESS_RE.match(line):
passed = True
elif E2E_LOG_FAIL_RE.match(line):
passed = False
return started, finished, passed
def datetime_to_unix(datetime_obj):
"""convert datetime.datetime to unix timestamp"""
return int(time.mktime(datetime_obj.timetuple()))
def testgrid_started_json_contents(start_time):
"""returns the string contents of a testgrid started.json file
Args:
start_time (datetime.datetime)
Returns:
contents (str)
"""
started = datetime_to_unix(start_time)
return json.dumps({
'timestamp': started
})
def testgrid_finished_json_contents(finish_time, passed, metadata):
"""returns the string contents of a testgrid finished.json file
Args:
finish_time (datetime.datetime)
passed (bool)
metadata (str)
Returns:
contents (str)
"""
finished = datetime_to_unix(finish_time)
result = 'SUCCESS' if passed else 'FAILURE'
if metadata:
testdata = json.loads(metadata)
return json.dumps({
'timestamp': finished,
'result': result,
'metadata': testdata
})
return json.dumps({
'timestamp': finished,
'result': result
})
def upload_string(gcs_path, text, dry):
"""Uploads text to gcs_path if dry is False, otherwise just prints"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain', 'cp', '-', gcs_path]
print('Run:', cmd, 'stdin=%s' % text, file=sys.stderr)
if dry:
return
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, encoding='utf8')
proc.communicate(input=text)
if proc.returncode != 0:
raise RuntimeError(
"Failed to upload with exit code: %d" % proc.returncode)
def upload_file(gcs_path, file_path, dry):
"""Uploads file at file_path to gcs_path if dry is False, otherwise just prints"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain',
'cp', file_path, gcs_path]
print('Run:', cmd, file=sys.stderr)
if dry:
return
proc = subprocess.Popen(cmd)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError(
'Failed to upload with exit code: %d' % proc.returncode)
def get_current_account(dry_run):
"""gets the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'list',
'--filter=status:ACTIVE', '--format=value(account)']
print('Run:', cmd, file=sys.stderr)
if dry_run:
return ""
return subprocess.check_output(cmd, encoding='utf-8').strip('\n')
def set_current_account(account, dry_run):
"""sets the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'config', 'set', 'core/account', account]
print('Run:', cmd, file=sys.stderr)
if dry_run:
return None
return subprocess.check_call(cmd)
def activate_service_account(key_file, dry_run):
"""activates a gcp service account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'activate-service-account', '--key-file='+key_file]
print('Run:', cmd, file=sys.stderr)
if dry_run:
return
subprocess.check_call(cmd)
def revoke_current_account(dry_run):
"""logs out of the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'revoke']
print('Run:', cmd, file=sys.stderr)
if dry_run:
return None
return subprocess.check_call(cmd)
def parse_args(cli_args=None):
if cli_args is None:
cli_args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help=('GCS bucket to upload the results to,'
' of the form \'gs://foo/bar\''),
required=True,
)
parser.add_argument(
'--year',
help=('the year in which the log is from, defaults to the current year.'
' format: YYYY'),
default=str(datetime.datetime.now().year),
)
parser.add_argument(
'--junit',
help='path or glob expression to the junit xml results file(s)',
required=True,
)
parser.add_argument(
'--log',
help='path to the test log file, should contain the ginkgo output',
required=True,
)
parser.add_argument(
'--dry-run',
help='if set, do not actually upload anything, only print actions',
required=False,
action='store_true',
)
parser.add_argument(
'--metadata',
help='dictionary of additional key-value pairs that can be displayed to the user.',
required=False,
default=str(),
)
parser.add_argument(
'--key-file',
help='path to GCP service account key file, which will be activated before '
'uploading if provided, the account will be revoked and the active account reset '
'on exit',
required=False,
)
return parser.parse_args(args=cli_args)
def main(cli_args):
args = parse_args(cli_args)
# optionally activate a service account with upload credentials
if args.key_file:
# grab the currently active account if any, and if there is one
# register a handler to set it active again on exit
current_account = get_current_account(args.dry_run)
if current_account:
atexit.register(
lambda: set_current_account(current_account, args.dry_run)
)
# login to the service account and register a handler to logout before exit
# NOTE: atexit handlers are called in LIFO order
activate_service_account(args.key_file, args.dry_run)
atexit.register(lambda: revoke_current_account(args.dry_run))
# find the matching junit files, there should be at least one for a useful
# testgrid entry
junits = glob.glob(args.junit)
if not junits:
print('No matching JUnit files found!')
sys.exit(-1)
# parse the e2e.log for start time, finish time, and success
with open(args.log) as file_handle:
started, finished, passed = parse_e2e_logfile(file_handle, args.year)
# convert parsed results to testgrid json metadata blobs
started_json = testgrid_started_json_contents(started)
finished_json = testgrid_finished_json_contents(
finished, passed, args.metadata)
# use timestamp as build ID
gcs_dir = args.bucket + '/' + str(datetime_to_unix(started))
# upload metadata, log, junit to testgrid
print('Uploading entry to: %s' % gcs_dir)
upload_string(gcs_dir+'/started.json', started_json, args.dry_run)
upload_string(gcs_dir+'/finished.json', finished_json, args.dry_run)
upload_file(gcs_dir+'/build-log.txt', args.log, args.dry_run)
for junit_file in junits:
upload_file(gcs_dir+'/artifacts/' +
path.basename(junit_file), junit_file, args.dry_run)
print('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
| |
import argparse
import json
from pprint import pprint
from sphinxarg.parser import parse_parser, parser_navigate
def test_parse_options():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', action='store_true', default=False, help='foo help')
parser.add_argument('--bar', action='store_true', default=False)
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--foo'],
'default': False,
'help': 'foo help'
}, {
'name': ['--bar'],
'default': False,
'help': ''
},
]
def test_parse_default():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', default='123')
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--foo'],
'default': '123',
'help': ''
}
]
def test_parse_arg_choices():
parser = argparse.ArgumentParser()
parser.add_argument('move', choices=['rock', 'paper', 'scissors'])
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'move',
'help': '',
'choices': ['rock', 'paper', 'scissors'],
'metavar': None
}
]
def test_parse_opt_choices():
parser = argparse.ArgumentParser()
parser.add_argument('--move', choices=['rock', 'paper', 'scissors'])
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--move'],
'default': None,
'help': '',
'choices': ['rock', 'paper', 'scissors']
}
]
def test_parse_default_skip_default():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', default='123')
data = parse_parser(parser, skip_default_values=True)
assert data['options'] == [
{
'name': ['--foo'],
'default': '==SUPPRESS==',
'help': ''
}
]
def test_parse_positional():
parser = argparse.ArgumentParser()
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
def test_parse_description():
parser = argparse.ArgumentParser(description='described', epilog='epilogged')
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
data = parse_parser(parser)
assert data['description'] == 'described'
assert data['epilog'] == 'epilogged'
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
def test_parse_nested():
parser = argparse.ArgumentParser()
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('install', help='install help')
subparser.add_argument('ref', type=str, help='foo1 help')
subparser.add_argument('--upgrade', action='store_true', default=False, help='foo2 help')
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
assert data['children'] == [
{
'name': 'install',
'help': 'install help',
'usage': 'usage: py.test install [-h] [--upgrade] ref',
'bare_usage': 'py.test install [-h] [--upgrade] ref',
'args': [
{
'name': 'ref',
'help': 'foo1 help',
'metavar': None
},
],
'options': [
{
'name': ['--upgrade'],
'default': False,
'help': 'foo2 help'
},
]
},
]
def test_parse_nested_traversal():
parser = argparse.ArgumentParser()
subparsers1 = parser.add_subparsers()
subparser1 = subparsers1.add_parser('level1')
subparsers2 = subparser1.add_subparsers()
subparser2 = subparsers2.add_parser('level2')
subparsers3 = subparser2.add_subparsers()
subparser3 = subparsers3.add_parser('level3')
subparser3.add_argument('foo', help='foo help')
subparser3.add_argument('bar')
data = parse_parser(parser)
data3 = parser_navigate(data, 'level1 level2 level3')
assert data3['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
data2 = parser_navigate(data, 'level1 level2')
assert data2['children'] == [
{
'name': 'level3',
'help': '',
'usage': 'usage: py.test level1 level2 level3 [-h] foo bar',
'bare_usage': 'py.test level1 level2 level3 [-h] foo bar',
'args': [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
},
{
'name': 'bar',
'help': '',
'metavar': None
},
],
}
]
assert data == parser_navigate(data, '')
| |
# encoding=utf8
"""
Tests for the API
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.test.utils import override_settings
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.db.models.signals import pre_delete, post_delete, post_init, post_save, m2m_changed
from mock import patch, MagicMock, PropertyMock
from mockldap import MockLdap
import unittest
import ldap
from ldap import modlist
from fum.ldap_helpers import test_user_ldap, ldap_cls, PoolLDAPBridge, LDAPBridge
from fum.models import (
Users, Servers, Groups, Projects, EMails, EMailAliases, BaseGroup,
Resource, SSHKey,
)
import datetime, json, sys, copy, os, time
from pprint import pprint as pp
from fum.api.changes import changes_save, rest_reverse
from fum.common.ldap_test_suite import LdapSuite, LdapTransactionSuite, random_ldap_password
from rest_framework.authtoken.models import Token
from rest_framework import status
create=[('objectClass', ['top', 'posixGroup', 'groupOfUniqueNames', 'mailRecipient', 'google', 'sambaGroupMapping', 'labeledURIObject']), ('sambaGroupType', '2'), ('gidNumber', '4001'), ('sambaSID', '{0}-9003'.format(settings.SAMBASID_BASE)), ('cn', 'TestGroup')]
update=[(2, 'sambaGroupType', '2'), (2, 'gidNumber', '4001'), (2, 'cn', 'TestGroup'), (2, 'objectClass', ['top', 'posixGroup', 'groupOfUniqueNames', 'mailRecipient', 'google', 'sambaGroupMapping', 'labeledURIObject']), (2, 'sambaSID', '{0}-9003'.format(settings.SAMBASID_BASE))]
update_nonexisting=[(2, 'nothere', '2'),]
add_empty=[(0, 'description', 'Blaa'),]
update_empty=[(2, 'description', 'Blaa'),]
add_user=[(0, 'uniqueMember', 'uid=hhol,{0}'.format(settings.USER_DN)),]
add_user_two=[(0, 'uniqueMember', 'uid=mmal,{0}'.format(settings.USER_DN)),]
add_user_three=[(0, 'uniqueMember', 'uid=mvih,{0}'.format(settings.USER_DN)),]
replace_user=[(2, 'uniqueMember', 'uid=hhol,{0}'.format(settings.USER_DN)),]
replace_user_two=[(2, 'uniqueMember', 'uid=mmal,{0}'.format(settings.USER_DN)),]
delete_user=[(1, 'uniqueMember', 'uid=hhol,{0}'.format(settings.USER_DN)),]
class RawLdapTestCase(LdapSuite):
def test_mod(self):
l = ldap_cls(parent=None, LDAP_CLASS='fum.ldap_helpers.LDAPBridge')
dn = 'cn=TestGroup,{0}'.format(settings.PROJECT_DN)
try:
l.connection.delete_s(dn)
except ldap.NO_SUCH_OBJECT, e:
print e
try:
l.connection.add_s(dn, create)
except ldap.ALREADY_EXISTS, e:
print e
l.connection.modify_s(dn, update)
if 'test_live' in os.environ.get('DJANGO_SETTINGS_MODULE'):
with self.assertRaises(ldap.OBJECT_CLASS_VIOLATION):
l.connection.modify_s(dn, update_nonexisting)
l.connection.modify_s(dn, update_empty)
with self.assertRaises(ldap.TYPE_OR_VALUE_EXISTS):
l.connection.modify_s(dn, add_empty)
l.connection.modify_s(dn, add_user)
l.connection.modify_s(dn, add_user_two)
l.connection.modify_s(dn, add_user_three)
l.connection.modify_s(dn, delete_user)
l.connection.modify_s(dn, [(ldap.MOD_ADD, 'mail', 'me@mail.com'),])
l.connection.modify_s(dn, [(ldap.MOD_ADD, 'mail', 'you@mail.com'),])
l.connection.modify_s(dn, [(ldap.MOD_DELETE, 'mail', 'me@mail.com'),])
#l.connection.modify_s(dn, [(ldap.MOD_DELETE, 'mail', None),])
l.connection.delete_s(dn)
class ChangesTestCase(LdapSuite):
def tearDown(self):
Resource.objects.all().delete()
super(ChangesTestCase, self).tearDown()
def test_user_create(self):
with patch('fum.api.changes.send_data') as o:
user = Users.objects.create(first_name="A", last_name="B", username="xxx", google_status=Users.ACTIVEPERSON)
sent_data = list(o.call_args)[0][0][0]
self.assertTrue(all(k in sent_data.keys() for k in ['objectUrl', 'operation', 'timestamp', 'objectId', 'objectType', 'attrs']))
self.assertEqual(sent_data['operation'], 'create')
self.assertEqual(sent_data['objectId'], user.name)
self.assertEqual(sent_data['objectType'], 'user')
self.assertEqual(sent_data['objectUrl'], rest_reverse('users-detail', args=[user]))
user.delete()
def test_user_update(self):
user = Users.objects.create(first_name="A", last_name="B", username="xxx", google_status=Users.ACTIVEPERSON)
with patch('fum.api.changes.send_data') as o:
user.last_name = 'Dekkari'
user.save()
sent_data = list(o.call_args)[0][0][0]
self.assertEqual(sent_data['operation'], 'update')
user.delete()
def test_resource_create(self):
name = 'woot'
r = Resource(name=name, url='http://woot.com')
r.content_object = self.user
r.save()
r.name = 'woof woof'
r.save()
def test_resource_create_without_schema(self):
name = 'woot.com'
r = Resource(name=name, url=name, content_object=self.user)
r.save()
self.assertEqual(r.url, 'http://%s'%name)
name = 'spotify://woot.com'
r = Resource(name=name, url=name, content_object=self.user)
with self.assertRaises(ValidationError):
r.save()
def test_auditlog_internal(self):
name = 'woot'
with patch('fum.api.changes.send_data') as o:
r = Resource(name=name, url='http://woot.com')
r.content_object = self.user
r.save()
sent_data = list(o.call_args)[0][0][0]
class PermissionTestCase(LdapSuite):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.ttes = self.save_safe(Users,
kw=dict(first_name="Teemu", last_name="Testari", username="ttes", google_status=Users.ACTIVEPERSON),
lookup=dict(username='ttes'))
def tearDown(self):
self.ttes.delete()
super(PermissionTestCase, self).tearDown()
def test_search(self):
results = self.user.ldap.fetch(settings.USER_DN, filters='(ou=*)', scope=ldap.SCOPE_BASE)
self.assertEqual(results['ou'], ['People'])
def test_delete_bad_dn(self):
ERROR_CODE = 105
try:
self.assertEqual(ERROR_CODE, self.ldap.delete(dn="uid=ylamummo,{0}".format(settings.USER_DN))[0]) # mock
except ldap.NO_SUCH_OBJECT, e:
self.assertTrue(1) # live
def test_only_owner_can_edit(self):
user = self.ttes
new_name = 'Heikki'
response = self.client.post("/api/users/%s/"%user.username, {
"first_name": new_name,
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 400)
self.assertEqual(Users.objects.get(username='ttes').first_name, user.first_name)
response = self.client.post("/api/users/%s/"%user.username, {
"phone1": '001235124',
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(json.loads(response.content)['__all__'][0], 'Permission denied')
self.assertEquals(response.status_code, 400)
def test_sudoer_can_edit(self):
user = self.ttes
new_name = 'Heikki'
# test sudo failure
response = self.client.post('/sudo/',
{'password':self.PASSWORD},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
REMOTE_USER=self.USERNAME)
self.assertEquals(response.status_code, 401)
# add to TeamIT
g = self.save_safe(Groups,
kw=dict(name=settings.IT_TEAM),
lookup=dict(name=settings.IT_TEAM))
try:
g.users.add(self.user)
except ldap.TYPE_OR_VALUE_EXISTS, e: # live LDAP not cleaned
pass
response = self.client.post('/sudo/',
{'password': self.PASSWORD},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
REMOTE_USER=self.USERNAME)
self.assertEquals(response.status_code, 200)
response = self.client.post("/api/users/%s/"%user.username, {
"phone1": '001235124',
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',
REMOTE_USER=self.USERNAME)
self.assertEquals(response.status_code, 200)
def test_user_can_edit_non_restricted_fields(self):
# can not edit other users without SUDO/ownership
user = self.ttes
new_name = 'Heikki'
response = self.client.post("/api/users/%s/"%user.username, {
"first_name": new_name,
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 400)
self.assertEqual(Users.objects.get(username='ttes').first_name, user.first_name)
response = self.client.post("/api/users/%s/"%user.username, {
"phone1": '001235124',
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(json.loads(response.content)['__all__'][0], 'Permission denied')
self.assertEquals(response.status_code, 400)
def test_user_can_not_m2m_protected_groups(self):
user = self.ttes
name = 'Futurice'
response = self.client.post("/api/groups/", {
"name": name,
})
self.assertEquals(response.status_code, 400)
futurice = self.save_safe(Groups,
kw=dict(name=name),
lookup=dict(name=name))
response = self.client.delete("/api/groups/{0}/".format(name))
self.assertEquals(response.status_code, 403)
response = self.client.get("/api/groups/{0}/".format(name))
self.assertEquals(response.status_code, 200)
response = self.client.post("/api/groups/{0}/users/".format(name),
{"items": [user.username]})
self.assertEquals(response.status_code, 403)
class TokenPermissionTestCase(LdapSuite):
def setUp(self):
super(TokenPermissionTestCase, self).setUp()
self.client = Client()
user,_ = User.objects.get_or_create(username='Bob')
self.token = Token.objects.create(user=user)
self.token_signature = 'Token '+self.token.key
self.ttes = self.save_safe(Users,
kw=dict(first_name="Teemu", last_name="Testari", username="ttes", google_status=Users.ACTIVEPERSON),
lookup=dict(username='ttes'))
def test_token_auth(self):
response = self.client.get("/api/")
self.assertEqual(response.status_code, 403)
response = self.client.get("/api/",
{},
HTTP_AUTHORIZATION=self.token_signature)
self.assertEqual(response.status_code, 200)
user = Users.objects.get(username=self.USERNAME)
self.assertEqual(Users.objects.get(username=self.USERNAME).active_in_planmill, Users.PLANMILL_DISABLED)
response = self.client.post("/api/users/%s/"%user.username, {
"active_in_planmill": Users.PLANMILL_ACTIVE,
},
HTTP_AUTHORIZATION=self.token_signature,
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(response.status_code, 200)
self.assertEqual(Users.objects.get(username=self.USERNAME).active_in_planmill, Users.PLANMILL_ACTIVE)
response = self.client.post("/api/users/%s/"%user.username, {
"active_in_planmill": str(Users.PLANMILL_INACTIVE),
},
HTTP_AUTHORIZATION=self.token_signature,
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(response.status_code, 200)
self.assertEqual(Users.objects.get(username=self.USERNAME).active_in_planmill, Users.PLANMILL_INACTIVE)
response = self.client.post("/api/users/%s/"%user.username, {
"active_in_planmill": Users.PLANMILL_ACTIVE,
},
HTTP_AUTHORIZATION=self.token_signature,
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(response.status_code, 200)
self.assertEqual(Users.objects.get(username=self.USERNAME).active_in_planmill, Users.PLANMILL_ACTIVE)
# restricted records can be modified via API, with a valid token
response = self.client.post("/api/users/%s/"%self.ttes.username, {
"username": 'abc',
},
HTTP_AUTHORIZATION=self.token_signature,
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(response.status_code, 200)
response = self.client.post("/api/users/%s/"%self.ttes.username, {
"active_in_planmill": 1,
},
HTTP_AUTHORIZATION=self.token_signature,
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(response.status_code, 200)
def test_token_access_planmill(self):
response = self.client.get("/api/users/{0}/".format(self.ttes.username),
{},
HTTP_AUTHORIZATION=self.token_signature)
self.assertEqual(response.status_code, 200)
self.assertTrue(['active_in_planmill' in k for k in json.loads(response.content)])
response = self.client.get("/api/users/", {
'fields': 'id,active_in_planmill',
'limit': 0,
},
HTTP_AUTHORIZATION=self.token_signature)
self.assertEqual(response.status_code, 200)
self.assertTrue(all(['active_in_planmill' in k for k in json.loads(response.content)]))
class LdapSanityCase(TestCase):
# Test that mocks for LDAP are working throughout Django
data = [
("uid=fum3adm,ou=Administrators,ou=TopologyManagement,o=Netscaperoot", {"userPassword": ["njJc4RUWJVre"]}),
("uid=ttes,{0}".format(settings.USER_DN), {"userPassword": ["secret"], "uidNumber": ["2001"],}),
("uid=testuser,{0}".format(settings.USER_DN), {"userPassword": ["secret"], "uidNumber": ["2003"],}),
]
directory = dict(data)
def setUp(self):
self.mockldap = MockLdap(self.directory)
self.mockldap.start()
self.ldap = ldap_cls(parent=None, uri='ldap://localhost', LDAP_CLASS='fum.ldap_helpers.LDAPBridge')
def tearDown(self):
self.mockldap.stop()
def test_signals_mocked(self):
# ReconnectingLDAPBridge re-uses initial connection
option_count = len(settings.LDAP_CONNECTION_OPTIONS)
tls = ['initialize'] + ['set_option']*option_count + ['initialize']
self.assertEquals(self.ldap.connection.methods_called(),
tls +['start_tls_s', 'simple_bind_s'])
server = Servers.objects.create(name="Testiserveri222", description="Testiserverin kuvaus")
methods_called = self.ldap.connection.methods_called()
self.assertEquals(methods_called,
tls +['start_tls_s', 'simple_bind_s'])
#tls+['start_tls_s', 'simple_bind_s',] + tls + ['start_tls_s', 'simple_bind_s', 'add_s', 'add_s'])
server.delete()
methods_called = self.ldap.connection.methods_called()
self.assertEquals(methods_called,
tls +['start_tls_s', 'simple_bind_s'])
#tls+['start_tls_s', 'simple_bind_s',] + tls + ['start_tls_s', 'simple_bind_s', 'add_s', 'add_s', 'delete_s', 'delete_s'])
def test_down(self):
return
if 'test_live' in os.environ.get('DJANGO_SETTINGS_MODULE'):
lcon = ldap_cls(parent=None, uri='ldap://localhost', LDAP_CLASS='fum.ldap_helpers.ReconnectingLDAPBridge')
with self.assertRaises(ldap.SERVER_DOWN):
lcon.connection
class LdapTestCase(LdapSuite):
def setUp(self):
super(LdapTestCase, self).setUp()
pname = 'PTestiProjekti'
sname = 'TestiServer'
self.user2 = self.save_safe(Users,
kw=dict(first_name="TIina", last_name="Testaaja", username='tite', google_status=Users.ACTIVEPERSON),
lookup=dict(username='tite'))
self.project = self.save_safe(Projects,
kw=dict(name=pname, description="Testiprojektin kuvaus"),
lookup=dict(name=pname))
self.server = self.save_safe(Servers,
kw=dict(name=sname, description="Testiserverin kuvaus"),
lookup=dict(name=sname))
def tearDown(self):
self.user2.delete()
self.project.delete()
self.server.delete()
super(LdapTestCase, self).tearDown()
def test_delete_relation(self):
server_name = 'test_server_1'
server = self.save_safe(Servers, kw=dict(name=server_name), lookup=dict(name=server_name))
mail = '%s@futurice.com'%server_name
email = EMails(address=mail, content_object=server)
server.email.add(email)
self.assertEqual(self.ldap_val('mail', server), [mail])
server.email.remove(email)
with self.assertRaises(KeyError):
self.ldap_val('mail', server)
def test_save_sudoer(self):
data = ('cn=it-team,{0}'.format(settings.SUDO_DN),
{'sudoHost': ['ALL'],
'sudoUser': ['ileh', 'ojar',],
'cn': ['it-team']})
server_name = 'it-team'
server = self.save_safe(Servers, kw=dict(name=server_name), lookup=dict(name=server_name))
for username in data[1]['sudoUser']:
django_test_user, user = self.create_user(username)
try:
server.sudoers.add(user)
except ldap.TYPE_OR_VALUE_EXISTS, e:
pass
def test_create_server(self):
self.serverminion = self.save_safe(Servers, dict(name="Testiserveri222", description="Testiserverin kuvaus"), lookup=dict(name="Testiserveri222"))
self.serverminion.delete()
def test_create_without_saving_to_ldap(self):
username = 'aankdbonly'
user = Users(first_name="Aku", last_name="Ankka", username=username, google_status=Users.ACTIVEPERSON, LDAP_CLASS='fum.ldap_helpers.DummyLdap')
user.save()
user.ldap = ldap_cls(parent=user, LDAP_CLASS='fum.ldap_helpers.LDAPBridge')
with self.assertRaises(KeyError):
self.ldap_val('givenName', user)
self.assertTrue(Users.objects.get(username=username))
def test_create_without_saving_to_ldap_with_pk(self):
username = 'aank'
user = Users(id=3001, first_name="Aku", last_name="Ankka", username=username, google_status=Users.ACTIVEPERSON, LDAP_CLASS='fum.ldap_helpers.DummyLdap')
user.save()
self.assertTrue(Users.objects.get(username=username))
def test_create_email_without_saving_to_ldap(self):
username = 'aank3'
user = self.save_safe(Users,
kw=dict(id=3001, first_name="Aku", last_name="Ankka", username=username, google_status=Users.ACTIVEPERSON),
lookup=dict(username=username),
save_kw=dict(force_insert=True))
with self.assertRaises(KeyError):
self.ldap_val('mail', user)
email = 'paddy@mock.com'
with self.settings(LDAP_CLASS='fum.ldap_helpers.DummyLdap'):
em = EMails(address=email, content_object=user)
em.save()
with self.assertRaises(KeyError):
self.ldap_val('mail', user)
user.delete()
def test_create_user_with_email(self):
username = 'aank2'
try:
Users(username=username).ldap.delete()
except ldap.NO_SUCH_OBJECT, e:
print e
user = self.save_safe(Users,
kw=dict(first_name="Aku", last_name="Ankka", username=username, google_status=Users.ACTIVEPERSON),
lookup=dict(username=username))
with self.assertRaises(KeyError):
self.ldap_val('mail', user)
self.assertEqual(Users.objects.get(username=username).get_email(), None)
email = 'paddy@mock.com'
em = EMails(address=email, content_object=user)
em.save()
self.assertEqual(self.ldap_val('mail', user)[0], email)
self.assertEqual(Users.objects.get(username=username).get_email().address, email)
user.delete()
def test_create_email(self):
email = 'wizard@oz.com'
em = EMails(address=email, content_object=self.user)
em.save()
self.assertEqual(self.ldap_val('mail', self.user)[0], email)
self.assertEqual(Users.objects.get(username=self.user.username).get_email().address, email)
def test_user_modify(self):
username = 'beatlebug'
user = self.save_safe(Users,
kw=dict(first_name="Aku", last_name="Ankka", username=username, google_status=Users.ACTIVEPERSON),
lookup=dict(username=username))
#user = self.user
# ^ really wierd Mixin behaviour, not resetting state on .save() when running full suite
user.first_name = 'Heikki'
user.save()
self.assertEqual(self.ldap_val('givenName', user)[0], 'Heikki')
user.first_name = 'Teemu'
user.save()
self.assertEqual(self.ldap_val('givenName', user)[0], 'Teemu')
def test_project_m2m(self):
self.project.users.add(self.user)
self.assertEqual(self.project.users.all()[0], self.user)
self.assertTrue(any("uid=%s"%self.user.username in user for user in self.ldap_val('uniqueMember', self.project)))
def test_server_sudoers(self):
self.server.users.add(self.user)
self.server.sudoers.add(self.user2)
sudoers = self.ldap_val('sudoUser', self.server.sudoers)
sudoers_in_beginning = len(sudoers)
self.assertTrue(len(sudoers) > 0)
self.server.sudoers.add(self.user)
sudoers = self.ldap_val('sudoUser', self.server.sudoers)
self.assertEqual(len(sudoers), sudoers_in_beginning + 1)
members = self.ldap_val('uniqueMember', instance=self.server)
self.assertTrue(self.user.get_dn() in members)
sudoers = self.ldap_val('sudoUser', self.server.sudoers)
self.assertTrue(self.user.username in sudoers)
def test_server_m2m_user(self):
self.server.users.add(self.user)
members = self.ldap_val('uniqueMember', instance=self.server)
self.assertEqual([self.user.get_dn()], members)
# TODO: dn=self.server should be enough to determine DN for 'sudoUser'
dn = self.server.get_ldap_sudoers_dn()
with self.assertRaises(KeyError):
sudoers = self.ldap_val('sudoUser', self.server.sudoers)
def test_server_m2m_sudoer(self):
self.server.sudoers.add(self.user)
sudoers = self.ldap_val('sudoUser', self.server.sudoers)
self.assertEqual(sudoers, [self.user.get_ldap_id_value()])
def test_server_m2m_functions(self):
self.assertEqual(['uniqueMember', 'sudoUser'], [k.ldap_field for k in self.server.get_ldap_m2m_relations()])
self.assertEqual(['users', 'sudoers'], [k.name for k in self.server.get_ldap_m2m_relations()])
self.assertEqual([('users', 'uniqueMember'), ('sudoers', 'sudoUser')], [(k.name, k.ldap_field) for k in self.server.get_ldap_m2m_relations()])
def test_set_value(self):
from ldap import modlist
modified_values = {'key': 'new'}
self.assertEquals(modlist.modifyModlist({}, modified_values), [(0, 'key', 'new')])
mlist = self.ldap.get_modify_modlist(modified_values)
self.assertEquals(mlist, [(2, 'key', 'new')])
def test_set_to_empty(self):
from ldap import modlist
modified_values = {'key': ''}
self.assertEquals(modlist.modifyModlist({}, modified_values), [])
self.assertEquals(modlist.modifyModlist({'key': 'old-value'}, {'key': ''}), [(1, 'key', None)])
mlist = self.ldap.get_modify_modlist(modified_values)
self.assertEquals(mlist, [(2, 'key', None)])
modified_values = {'key': 'foo'}
self.assertEquals(modlist.modifyModlist({'key': ''}, {'key': 'foo'}), [(0, 'key', 'foo')])
mlist = self.ldap.get_modify_modlist(modified_values)
self.assertEquals(mlist, [(2, 'key', 'foo')])
self.user.title = 'Title'
self.user.save()
self.assertEquals(self.user.lval().get('title'), ['Title'])
self.user.title = ''
self.user.save()
self.assertTrue(self.user.lval().get('title') in [None, []])# TODO: FIX: mockldap returns [], ldappool None
self.user.title = 'NewTitle'
self.user.save()
self.assertEquals(self.user.lval().get('title'), ['NewTitle'])
class ApiTestCase(LdapTransactionSuite):
def setUp(self):
super(ApiTestCase, self).setUp()
# Create API user
self.API_USERNAME = 'TestAPI'
self.API_EMAIL = 'test@fum.futurice.com'
self.API_PASSWORD = random_ldap_password()
self.django_test_user, self.apiuser = self.create_user(self.API_USERNAME, email=self.API_EMAIL, password=self.API_PASSWORD)
# Set permissions for user
models = [Users, Servers, Groups, Projects]
for model in models:
content_type = ContentType.objects.get_for_model(model)
permission_add = Permission.objects.get(content_type=content_type, codename='add_%s'%model.__name__.lower())
permission_del = Permission.objects.get(content_type=content_type, codename='delete_%s'%model.__name__.lower())
permission_edi = Permission.objects.get(content_type=content_type, codename='change_%s'%model.__name__.lower())
self.django_test_user.user_permissions.add(permission_add, permission_del, permission_edi)
self.c = Client()
self.c.login(username=self.API_USERNAME, password=self.API_PASSWORD)
def tearDown(self):
#Delete objects that may have been saved to db/ldap and not deleted
try:
g = Groups.objects.get(name="TestGroup")
g.delete()
except (ObjectDoesNotExist, ldap.NO_SUCH_OBJECT):
pass
try:
s = Servers.objects.get(name="TestServer")
s.delete()
except (ObjectDoesNotExist, ldap.NO_SUCH_OBJECT):
pass
for k in ['PTestProject','PTestProjectX','PTestProjectX2']:
try:
p = Projects.objects.get(name=k)
p.delete()
except (ObjectDoesNotExist, ldap.NO_SUCH_OBJECT):
pass
self.apiuser.delete()
super(ApiTestCase, self).tearDown()
def test_status(self):
url = rest_reverse('users-status', args=[self.apiuser.username])
response = self.c.get(url)
self.assertEquals(json.loads(response.content), {'status': 'active'})
self.sudomode()
response = self.c.post(url,
{"status": Users.USER_DISABLED,},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.get(url)
self.assertEquals(json.loads(response.content), {'status': 'disabled'})
def test_email_integrity(self):
mail = 'pentti@futurice.com'
self.user.email.add(EMails(address=mail, content_object=self.user))
group_mail = u"test.Group1@futurice.com"
project_name = 'PTestGroup'
try:
l = ldap_cls(parent=None)
l.delete(dn=Projects(name=project_name).get_dn())
except ldap.NO_SUCH_OBJECT, e:
pass
try:
response = self.c.post("/api/projects/", {
"name": project_name,
"email": group_mail,
})
self.assertEquals(response.status_code, 201)
except ldap.ALREADY_EXISTS:
pass
project = Projects.objects.get(name=project_name)
self.assertEqual(project.get_email().address, group_mail)
self.assertEqual(self.ldap_val('mail', project), [group_mail])
response = self.client.post("/api/projects/%s/"%project.name,
{"description": 'A project',},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(project.get_email().address, group_mail)
self.assertEqual(self.ldap_val('mail', project), [group_mail])
with self.assertRaises(ValidationError):
response = self.client.post("/api/projects/%s/"%project.name,
{"email": mail,},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(project.get_email().address, group_mail)
self.assertEqual(self.ldap_val('mail', project), [group_mail])
response = self.client.post("/api/projects/%s/"%project.name,
{"email": '',},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEqual(project.get_email(), None)
with self.assertRaises(KeyError):
self.ldap_val('mail', project)
def test_get_user_by_email(self):
response = self.c.post("/api/users/%s/"%self.apiuser.username,
{"email": self.API_EMAIL,},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
response = self.c.get("/api/users/", {"email": self.API_EMAIL,},)
self.assertEqual(json.loads(response.content)['results'][0]['username'], self.apiuser.username)
self.client.delete("/api/users/%s"%self.apiuser.username)
def test_api_root(self):
response = self.c.get("/api/", {})
self.assertContains(response, "users")
self.assertContains(response, "groups")
self.assertContains(response, "servers")
self.assertContains(response, "projects")
def test_user(self):
name = "testusermikko"
mail = "test.user@futurice.com"
response = self.c.post("/api/users/", {
"username": name,
"first_name": "Test",
"last_name": "User",
"google_status": Users.ACTIVEPERSON,
"email": mail
})
self.assertEquals(response.status_code, 201)
user = Users.objects.get(username=name)
django_user,_ = User.objects.get_or_create(username=name, is_active=True)
django_user.set_password(name)
django_user.save()
django_user_client = self.client
self.djc = Client()
self.djc.login(username=name, password=name)
response = self.c.get("/api/users/", {})
self.assertEquals(response.status_code, 200)
self.assertContains(response, mail)
self.assertEqual(self.ldap_val('mail', user), [mail])
response = self.c.get("/api/users/%s/"%name, {})
self.assertContains(response, name)
new_mail = "test.emailmikko@futurice.com"
response = self.c.post('/api/users/%s/'%name,
{'email': new_mail},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 400)
response = self.djc.post('/api/users/%s/'%name,
{'email': new_mail},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.get("/api/users/%s/"%name, {})
self.assertContains(response, new_mail)
self.assertEqual(self.ldap_val('mail', user), [new_mail])
response = self.c.delete("/api/users/%s/"%name)
self.assertEquals(response.status_code, 204)
with self.assertRaises(KeyError):
self.ldap_val('mail', user)
response = self.c.get("/api/users/", {})
self.assertNotContains(response, name)
def test_group(self):
try:
response = self.c.post("/api/groups/", {
"name": "TestGroup",
"description": "Test group1234",
"email":"test.Group1@futurice.com"
})
self.assertEquals(response.status_code, 201)
except ldap.ALREADY_EXISTS:
pass
response = self.c.get("/api/groups/", {})
self.assertContains(response, "group1234")
response = self.c.get("/api/groups/TestGroup/", {})
self.assertContains(response, "TestGroup")
self.assertContains(response, "test.Group1@futurice.com")
response = self.c.post("/api/groups/TestGroup/",
{'email': "test.groupm@futurice.com"},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.get("/api/groups/TestGroup/", {})
self.assertContains(response, "test.groupm@futurice.com")
g = self.save_safe(Groups,
kw=dict(name=settings.IT_TEAM),
lookup=dict(name=settings.IT_TEAM))
response = self.c.post("/api/groups/TestGroup/", {
"editor_group": g.name,
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 400)
g.users.add(self.apiuser)
response = self.c.post('/sudo/',
{'password': self.API_PASSWORD},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
REMOTE_USER=self.API_USERNAME)
self.assertEquals(response.status_code, 200)
response = self.c.post("/api/groups/TestGroup/", {
"editor_group": g.name,
},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.delete("/api/groups/TestGroup/", {})
self.assertEquals(response.status_code, 204)
response = self.c.get("/api/groups/", {})
self.assertNotContains(response, "group1234")
def test_server(self):
response = self.c.post("/api/servers/", {
"name": "TestServer",
"description": "Test server1234",
"email":"testServer1@futurice.com"
})
self.assertEquals(response.status_code, 201)
response = self.c.get("/api/servers/", {})
self.assertContains(response, "server1234")
self.assertContains(response, "testServer1@futurice.com")
response = self.c.get("/api/servers/TestServer/", {})
self.assertContains(response, "TestServer")
response = self.c.post("/api/servers/TestServer/",
{'email': "test.serverm@futurice.com"},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.get("/api/servers/TestServer/", {})
self.assertContains(response, "test.serverm@futurice.com")
response = self.c.delete("/api/servers/TestServer/", {})
self.assertEquals(response.status_code, 204)
response = self.c.get("/api/servers/", {})
self.assertNotContains(response, "server1234")
def test_project_create(self):
response = self.c.post("/api/projects/", {
"name": "PTestProjectX",})
self.assertEqual(response.status_code, 201)
response = self.c.post("/api/projects/", {
"name": "PTestProjectX2",
"description": "",})
self.assertEqual(response.status_code, 201)
def test_project(self):
pname = 'PTestProject'
response = self.c.post("/api/projects/", {
"name": pname,
"description": "Test project1234",
"email":"TestProject21@futurice.com"
})
self.assertEquals(response.status_code, 201)
response = self.c.get("/api/projects/", {})
self.assertContains(response, "project1234")
self.assertContains(response, "TestProject21@futurice.com")
response = self.c.get("/api/projects/%s/"%pname, {})
self.assertContains(response, pname)
response = self.c.post("/api/projects/%s/"%pname,
{'email': "test.projectm@futurice.com"},
HTTP_X_HTTP_METHOD_OVERRIDE='PATCH',)
self.assertEquals(response.status_code, 200)
response = self.c.get("/api/projects/%s/"%pname, {})
self.assertContains(response, "test.projectm@futurice.com")
response = self.c.delete("/api/projects/%s/"%pname, {})
self.assertEquals(response.status_code, 204)
response = self.c.get("/api/projects/", {})
self.assertNotContains(response, "project1234")
def test_set_homedir(self):
response = self.c.post("/api/users/", {
"username": "testhomediruser",
"first_name": "Home",
"last_name": "DirUser",
"google_status": Users.ACTIVEPERSON,
"home_directory": "/home/ci/testhomediruser",
})
self.assertEquals(response.status_code, 201)
response = self.c.get("/api/users/", {})
self.assertEquals(response.status_code, 200)
self.assertContains(response, "testhomediruser")
self.assertContains(response, "/home/ci/testhomediruser")
u = Users.objects.get(username="testhomediruser")
u.delete()
response = self.c.get("/api/users/", {})
self.assertEquals(response.status_code, 200)
self.assertNotContains(response, "testhomediruser")
def test_del(self):
models = [[Servers,None], [Groups,None], [Projects,'PProjectName']]
for model, name in models:
model_name = model.__name__.lower()
name = copy.deepcopy(name or model.__name__.lower())
response = self.c.post("/api/%s/"%model_name, {
"name": name
})
self.assertEquals(response.status_code, 201)
content_type = ContentType.objects.get_for_model(model)
permission_del = Permission.objects.get(content_type=content_type, codename='delete_%s'%model_name)
self.django_test_user.user_permissions.remove(permission_del)
url = "/api/%s/%s/"%(model_name, name)
response = self.c.delete(url, {})
self.assertEquals(response.status_code, 403)
permission_del = Permission.objects.get(content_type=content_type, codename='delete_%s'%model_name)
self.django_test_user.user_permissions.add(permission_del)
response = self.c.delete("/api/%s/%s/"%(model_name, name),{})
self.assertEquals(response.status_code, 204)
try:
model.objects.get(name=name).delete()
except Exception as e:
print e
class ApiLimitsTestCase(LdapSuite):
def setUp(self):
super(ApiLimitsTestCase, self).setUp()
# Create API user
self.API_USERNAME = 'TestAPI'
self.API_EMAIL = 'test@fum.futurice.com'
self.API_PASSWORD = random_ldap_password()
self.django_test_user, self.apiuser = self.create_user(self.API_USERNAME, email=self.API_EMAIL, password=self.API_PASSWORD)
self.c = Client()
self.c.login(username=self.API_USERNAME, password=self.API_PASSWORD)
self.group = self.save_safe(Groups, dict(name='ListTestGroup', description="Test"), lookup=dict(name='ListTestGroup'))
self.server = self.save_safe(Servers, dict(name='ListTestServer', description="Test"), lookup=dict(name='ListTestServer'))
self.project = self.save_safe(Projects, dict(name='PListTestProject', description="Test"), lookup=dict(name='PListTestProject'))
def tearDown(self):
self.apiuser.delete()
self.group.delete()
self.server.delete()
self.project.delete()
super(ApiLimitsTestCase, self).tearDown()
def test_groups_default(self):
response = self.c.get("/api/groups/", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertContains(response, "email")
self.assertContains(response, "email_aliases")
self.assertContains(response, "editor_group")
self.assertContains(response, "users")
def test_servers_default(self):
response = self.c.get("/api/servers/", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertContains(response, "email")
self.assertContains(response, "email_aliases")
self.assertContains(response, "editor_group")
self.assertContains(response, "users")
self.assertContains(response, "sudoers")
def test_projects_default(self):
response = self.c.get("/api/projects/", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertContains(response, "email")
self.assertContains(response, "email_aliases")
self.assertContains(response, "editor_group")
self.assertContains(response, "users")
def test_users_default(self):
response = self.c.get("/api/users/", {})
self.assertContains(response, "count")
self.assertContains(response, "next")
self.assertContains(response, "previous")
self.assertContains(response, "results")
self.assertContains(response, "first_name")
self.assertContains(response, "last_name")
self.assertContains(response, "username")
self.assertContains(response, "title")
self.assertContains(response, "phone1")
self.assertContains(response, "phone2")
self.assertContains(response, "email")
self.assertContains(response, "skype")
self.assertContains(response, "google_status")
self.assertContains(response, "email_aliases")
def test_groups_limit(self):
response = self.c.get("/api/groups/?limit=0", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertNotContains(response, "email")
self.assertNotContains(response, "email_aliases")
self.assertNotContains(response, "editor_group")
self.assertNotContains(response, "users")
def test_servers_limit(self):
response = self.c.get("/api/servers/?limit=0", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertNotContains(response, "email")
self.assertNotContains(response, "email_aliases")
self.assertNotContains(response, "editor_group")
self.assertNotContains(response, "users")
self.assertNotContains(response, "sudoers")
def test_projects_limit(self):
response = self.c.get("/api/projects/?limit=0", {})
self.assertContains(response, "name")
self.assertContains(response, "description")
self.assertNotContains(response, "email")
self.assertNotContains(response, "email_aliases")
self.assertNotContains(response, "editor_group")
self.assertNotContains(response, "users")
def test_users_limit(self):
response = self.c.get("/api/users/?limit=0", {})
self.assertNotContains(response, "count")
self.assertNotContains(response, "next")
self.assertNotContains(response, "previous")
self.assertNotContains(response, "results")
self.assertContains(response, "first_name")
self.assertContains(response, "last_name")
self.assertContains(response, "username")
self.assertContains(response, "email")
self.assertNotContains(response, "title")
self.assertNotContains(response, "phone1")
self.assertNotContains(response, "phone2")
self.assertNotContains(response, "skype")
self.assertNotContains(response, "google_status")
self.assertNotContains(response, "email_aliases")
class ChaosException(Exception):
pass
class DataIntegrityTestCase(LdapTransactionSuite):
"""
Transanctional testing.
save()
-> signals:
pre_save
post_save
-> save_ldap()
changes.send_data is called in signals.post_save.changes
"""
def setUp(self):
super(DataIntegrityTestCase, self).setUp()
self.signals = []
self.cleanup = []
def tearDown(self):
super(DataIntegrityTestCase, self).tearDown() # keep at top
for signal in self.signals:
self.rem_signal(signal['type'], **signal['kw'])
for model in self.cleanup:
if isinstance(model, BaseGroup):
for k in model.users.all():
k.delete()
# deleting against ldap not implemented (19/8)
#model.delete()
def add_signal(self, signal, **kwargs):
signal.connect(**kwargs)
kwargs.pop('receiver', None)
self.signals.append({'type': signal, 'kw': kwargs})
def rem_signal(self, signal, **kwargs):
signal.disconnect(**kwargs)
def test_save_exception_in_changes(self):
name = 'Abe0'
with patch('fum.common.signals.injection') as o:
o.side_effect = ChaosException
server_mock = Servers(name=name)
try:
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
except ChaosException, e:
pass
with self.assertRaises(Servers.DoesNotExist):
Servers.objects.get(name=name)
with self.assertRaises(KeyError):
self.ldap_val('cn', server_mock)
def test_save_exception_in_signal_postsave(self):
name = 'Abe1'
with patch('fum.common.signals.injection') as o:
self.add_signal(post_save, receiver=o, sender=Servers, dispatch_uid='test_mocked_handler')
o.side_effect = ChaosException
server_mock = Servers(name=name)
with self.assertRaises(ChaosException):
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
with self.assertRaises(Servers.DoesNotExist):
Servers.objects.get(name=name)
with self.assertRaises(KeyError):
self.ldap_val('cn', server_mock)
self.assertEqual(o.call_count, 1)
def test_save(self):
name = 'Abe2'
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
# .save -> signal -> .save_ldap()
self.assertEqual(Servers.objects.get(name=name).name, name)
self.assertEqual(self.ldap_val('cn', server_mock), [name])
def test_sudoers(self):
name = 'AbSudoServer'
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="AbSudo"), lookup=dict(name=name))
server.users.add(self.user)
self.assertEqual(self.ldap_val('uniqueMember', server.users), [self.user.get_dn()])
server.sudoers.add(self.user)
self.assertEqual(self.ldap_val('sudoUser', server.sudoers), [self.user.username])
server.sudoers.remove(self.user)
try:
self.ldap_val('sudoUser', server.sudoers)
self.assertEqual(self.ldap_val('sudoUser', server.sudoers), [])
except KeyError:
self.assertEqual(True, True);
server.sudoers.add(self.user)
self.assertEqual(self.ldap_val('sudoUser', server.sudoers), [self.user.username])
server.delete()
try:
self.ldap.fetch(dn='cn=AbSudoServer,{0}'.format(settings.SERVER_DN), scope=ldap.SCOPE_BASE, filters='(cn=*)')
self.assertEqual(True, False)
except ldap.NO_SUCH_OBJECT:
self.assertEquals(True, True)
def test_save_m2m_changes_exception(self):
name = 'Abe3'
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
with patch('fum.common.signals.changes_m2m') as o:
o.side_effect = ChaosException
# changes_m2m wrapped in try/except
#with self.assertNotRaises(ChaosException):
server.users.add(self.user)
self.assertEqual([k.name for k in server.users.all()], [self.user.name])
self.assertEqual(self.ldap_val('uniqueMember', server.users), [self.user.get_dn()])
self.assertEqual(o.call_count, 2) # TODO: magic number against code that is changing...
def test_save_m2m_signal_exception(self):
""" ChaosException should rollback transaction, and ensure nothing goes to LDAP """
name = 'Abe4'
with patch('fum.common.signals.ldap_m2m', autospec=True) as o:
self.add_signal(m2m_changed, receiver=o, sender=Servers.users.through, dispatch_uid='test_mocked_handler_m2m')
o.side_effect = ChaosException
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
with self.assertRaises(ChaosException):
server.users.add(self.user)
self.assertEqual(list(server.users.all()), [])
with self.assertRaises(KeyError):
self.ldap_val('uniqueMember', server.users)
self.assertEqual(o.call_count, 1)
def test_save_m2m_signal_exception_on_delete(self):
name = 'Abe5'
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
server.users.add(self.user)
with patch('fum.common.signals.ldap_m2m', autospec=True) as o:
self.add_signal(m2m_changed, receiver=o, sender=Servers.users.through, dispatch_uid='test_mocked_handler_m2m_on_delete')
o.side_effect = ChaosException
with self.assertRaises(ChaosException):
server.users.remove(self.user)
self.assertEqual([k.name for k in server.users.all()], [self.user.name])
self.assertEqual(self.ldap_val('uniqueMember', server.users), [self.user.get_dn()])
self.assertEqual(o.call_count, 1)
def test_ldap_down_before_db_save(self):
oldval = copy.deepcopy(self.user.first_name)
with patch('fum.models.LDAPModel.save', autospec=True) as o:
o.side_effect = ldap.SERVER_DOWN
user = Users.objects.get(pk=self.user.pk)
user.first_name = 'Jooseppi'
with self.assertRaises(ldap.SERVER_DOWN):
user.save()
self.assertEqual(Users.objects.get(pk=self.user.pk).first_name, oldval)
self.assertEqual(self.ldap_val('givenName', self.user), [oldval])
def test_ldap_down_after_db_save(self):
oldval = copy.deepcopy(self.user.first_name)
with patch('fum.ldap_helpers.LDAPBridge.save', autospec=True) as o:
o.side_effect = ldap.SERVER_DOWN
user = Users.objects.get(pk=self.user.pk)
user.first_name = 'Jooseppi'
with self.assertRaises(ldap.SERVER_DOWN):
user.save()
self.assertEqual(Users.objects.get(pk=self.user.pk).first_name, oldval)
self.assertEqual(self.ldap_val('givenName', self.user), [oldval])
@unittest.expectedFailure
def test_ldap_down_after_db_and_ldap_save(self):
oldval = copy.deepcopy(self.user.first_name)
newval = 'Jooseppi'
with patch('fum.ldap_helpers.LDAPBridge.for_testing') as o:
o.side_effect = ChaosException
user = Users.objects.get(pk=self.user.pk)
user.first_name = copy.deepcopy(newval)
with self.assertRaises(ChaosException):
user.save()
self.assertEqual(Users.objects.get(pk=self.user.pk).first_name, oldval)
self.assertEqual(self.ldap_val('givenName', user), [oldval])
def test_ldap_down_after_db_and_during_m2m_ldap_save(self):
name = 'Hessu'
with patch('fum.common.signals.ldap_m2m', autospec=True) as o:
self.add_signal(m2m_changed, receiver=o, sender=Servers.users.through, dispatch_uid='test_mocked_handler_m2m_failure')
o.side_effect = ldap.SERVER_DOWN
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
with self.assertRaises(ldap.SERVER_DOWN):
server.users.add(self.user)
self.assertEqual(list(server.users.all()), [])
with self.assertRaises(KeyError):
self.ldap_val('uniqueMember', server.users)
self.assertEqual(o.call_count, 1)
def test_ldap_timeout_after_db_and_during_m2m_ldap_save(self):
name = 'Hessu'
with patch('fum.common.signals.ldap_m2m', autospec=True) as o:
self.add_signal(m2m_changed, receiver=o, sender=Servers.users.through, dispatch_uid='test_mocked_handler_m2m_timeout_failure')
o.side_effect = ldap.TIMEOUT
server_mock = Servers(name=name)
server = self.save_safe(Servers, dict(name=name, description="Abradabradaa"), lookup=dict(name=name))
self.cleanup.append(server)
with self.assertRaises(ldap.TIMEOUT):
server.users.add(self.user)
self.assertEqual(list(server.users.all()), [])
with self.assertRaises(KeyError):
self.ldap_val('uniqueMember', server.users)
self.assertEqual(o.call_count, 1)
def test_value_in_ldap_not_in_db_will_overwite_ldap(self):
name = 'Nightmare'
data = dict(name=name, description="Abradabradaa")
server = Servers(**data)
new_attrs = {}
new_attrs['objectClass'] = copy.deepcopy(server.ldap_object_classes)
gu_id = 259
new_attrs[server.ldap_id_number_field] = "%d"%gu_id
new_attrs.update(data)
del new_attrs['name']
new_attrs['objectClass'].remove('sambaGroupMapping')
mlist = modlist.addModlist(new_attrs)
try:
server.ldap.create_raw(server.get_dn(), mlist)
except ldap.ALREADY_EXISTS, e:
print e # no tearDown for this
server = self.save_safe(Servers, data, lookup=dict(name=name))
self.assertTrue(Servers.objects.get(name=name))
server.description = 'Wonderland'
server.save()
self.assertEqual(self.ldap_val('description', server)[0], Servers.objects.get(name=name).description)
def test_m2m_value_in_ldap_not_in_db_will_overwite_ldap(self):
name = 'Nightmare2'
data = dict(name=name, description="Abradabradaa")
server = Servers(**data)
new_attrs = {}
new_attrs['objectClass'] = copy.deepcopy(server.ldap_object_classes)
gu_id = 259
new_attrs[server.ldap_id_number_field] = "%d"%gu_id
new_attrs.update(data)
del new_attrs['name']
new_attrs['objectClass'].remove('sambaGroupMapping')
mlist = modlist.addModlist(new_attrs)
try:
server.ldap.create_raw(server.get_dn(), mlist)
except ldap.ALREADY_EXISTS, e:
print e # no tearDown for this
server = self.save_safe(Servers, data, lookup=dict(name=name))
self.assertTrue(Servers.objects.get(name=name))
# add related user to LDAP
mlist = [(0, 'uniqueMember', ['uid=testuser,{0}'.format(settings.USER_DN)])]
try:
server.ldap.save_ext_raw(server.get_dn(), mlist)
except ldap.TYPE_OR_VALUE_EXISTS, e:
print e # no tearDown for this
server.users.add(self.user)
self.assertEqual([k.username for k in server.users.all()], [self.user.username])
self.assertEqual([k.get_dn() for k in server.users.all()], self.ldap_val('uniqueMember', server.users))
mlist = [(0, 'sudoUser', ['testuser'])]
try:
server.ldap.save_ext_raw(server.get_ldap_sudoers_dn(), mlist)
except ldap.TYPE_OR_VALUE_EXISTS, e:
print e # no tearDown for this
server.sudoers.add(self.user)
self.assertEqual([k.username for k in server.sudoers.all()], self.ldap_val('sudoUser', server.sudoers))
class ProjectTestCase(LdapTransactionSuite):
def test_create_name(self):
p = Projects()
p.name = 'PFoo'
setattr(settings, 'FUM_LAUNCH_DAY', datetime.datetime.now() - datetime.timedelta(days=5))
with self.assertRaises(ValidationError):
p.full_clean()
p.name = 'foo'
with self.assertRaises(ValidationError):
p.full_clean()
p.name = 'PfooBar'
with self.assertRaises(ValidationError):
p.full_clean()
p.name = 'PFooBar'
self.assertEqual(None, p.full_clean())
p.name = 'P90Balloons'
self.assertEqual(None, p.full_clean())
p.name = 'PCompanyPHP'
self.assertEqual(None, p.full_clean())
p.name = 'PCOMPANYAndroid'
with self.assertRaises(ValidationError):
self.assertEqual(None, p.full_clean())
p.name = 'P247entertainmentJukeW8'
self.assertEqual(None, p.full_clean())
class SSHKeyTestCase(LdapTransactionSuite):
valid_ssh_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3uta/x/kAwbs2G7AOUQtRG7l1hjEws4mrvnTZmwICoGNi+TUwxerZgMbBBID7Kpza/ZSUqXpKX5gppRW9zECBsbJ+2D0ch/oVSZ408aUE6ePNzJilLA/2wtRct/bkHDZOVI+iwEEr1IunjceF+ZQxnylUv44C6SgZvrDj+38hz8z1Vf4BtW5jGOhHkddTadU7Nn4jQR3aFXMoheuu/vHYD2OyDJj/r6vh9x5ey8zFmwsGDtFCCzzLgcfPYfOdDxFIWhsopebnH3QHVcs/E0KqhocsEdFDRvcFgsDCKwmtHyZVAOKym2Pz9TfnEdGeb+eKrleZVsApFrGtSIfcf4pH user@host'
valid_ssh_key_2 = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI86o9MlbA7NI/lXbWk7OSJw2bEfOAJsdkqrGmU1FVUZwCRupmx+VnelinyEUDCC5fwycTMcPAkUk990xogN8iH3aHZkfpun89091U+DyeLrfYPwP1lGo5ubGdPseAxJYZ4nbNQcBCGamtAwMeHl9UUfEoLFNE6GK62Yo9MGBNl28AeOX/NNz3WniMImr45x2kuL7E/pugnKcUCc2i1a+xxQdm4aqOzek/RYZ9pAwl8KeVipEUHpFZWsldLlXM28agzIrdxAVURc7rUJyz2PtF5vBrPTNDVhqX0tG3fgZ2uLlyWfc3a97gQrlgXKqM13hQ2lK0h5dPYWRe4WTFrmQn user2@host2'
def get_ldap_ssh_keys(self, user=None):
user = user or self.user
data = self.user.ldap.op_search(user.get_dn(),
ldap.SCOPE_BASE, '(uid=' + user.username + ')',
[SSHKey.LDAP_ATTR])
data = data[0][1]
if SSHKey.LDAP_ATTR in data:
return data[SSHKey.LDAP_ATTR]
return []
def assert_ldap_ssh_key_count(self, count, user=None):
self.assertEqual(len(self.get_ldap_ssh_keys(user)), count)
def get_addsshkey_url(self, user=None):
user = user or self.user
return reverse('users-detail', args=[user.username]) + 'addsshkey/'
def get_deletesshkey_url(self, user=None):
user = user or self.user
return reverse('users-detail', args=[user.username]) + 'deletesshkey/'
def test_key_validity(self):
self.assert_ldap_ssh_key_count(0)
add_url = self.get_addsshkey_url()
delete_url = self.get_deletesshkey_url()
resp = self.client.post(add_url,
{'title': 'bad key', 'key': 'invalid string'}, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
resp = self.client.post(add_url,
{'title': 'my key', 'key': self.valid_ssh_key}, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(1)
resp = self.client.post(add_url,
{'title': 'my key 2', 'key': self.valid_ssh_key_2}, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(2)
resp = self.client.post(delete_url, {'fingerprint':
SSHKey.objects.filter(user=self.user)[0].fingerprint},
format='json', HTTP_X_HTTP_METHOD_OVERRIDE='DELETE')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(1)
def test_equal_fingerprints(self):
self.assert_ldap_ssh_key_count(0)
url = self.get_addsshkey_url()
resp = self.client.post(url,
{'title': 'k1', 'key': self.valid_ssh_key}, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# key with the same fingerprint
resp = self.client.post(url,
{'title': 'k2', 'key': self.valid_ssh_key + '2'}, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_transaction(self):
"""
Ensure no key is in the FUM DB if there's an error adding it to LDAP.
"""
self.assert_ldap_ssh_key_count(0)
url = self.get_addsshkey_url()
def mocked(*args, **kwargs):
raise Exception('mock')
cls = ldap_cls(parent=None).__class__
original = cls.op_modify
try:
cls.op_modify = mocked
with self.assertRaises(Exception):
resp = self.client.post(url,
{'title': 'k1', 'key': self.valid_ssh_key}, format='json')
finally:
cls.op_modify = original
self.assert_ldap_ssh_key_count(0)
self.assertEqual(SSHKey.objects.filter(user=self.user).count(), 0)
def test_permissions(self):
"""
Only sudo users can set or delete other users' ssh keys.
"""
self.assert_ldap_ssh_key_count(0)
add_url = self.get_addsshkey_url()
delete_url = self.get_deletesshkey_url()
resp = self.client.post(add_url,
{'title': 'my key', 'key': self.valid_ssh_key}, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(1)
pw = random_ldap_password()
other_dj_user, other_user = self.create_user('test_perm_user',
password=pw)
self.assertTrue(self.client.login(username=other_user.username,
password=pw))
# normal users can't add or delete other users' keys
resp = self.client.post(add_url,
{'title': 'my key', 'key': self.valid_ssh_key_2}, format='json')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assert_ldap_ssh_key_count(1)
resp = self.client.post(delete_url, {'fingerprint':
SSHKey.objects.get(user=self.user).fingerprint},
format='json', HTTP_X_HTTP_METHOD_OVERRIDE='DELETE')
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assert_ldap_ssh_key_count(1)
# superusers can add and delete others' ssh keys
# add to TeamIT
g = self.save_safe(Groups,
kw=dict(name=settings.IT_TEAM),
lookup=dict(name=settings.IT_TEAM))
try:
g.users.add(other_user)
except ldap.TYPE_OR_VALUE_EXISTS, e: # live LDAP not cleaned
pass
response = self.client.post('/sudo/', {'password': pw},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEquals(response.status_code, status.HTTP_200_OK)
resp = self.client.post(add_url,
{'title': 'my key', 'key': self.valid_ssh_key_2}, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(2)
for ssh_key in SSHKey.objects.filter(user=self.user):
resp = self.client.post(delete_url,
{'fingerprint': ssh_key.fingerprint},
format='json', HTTP_X_HTTP_METHOD_OVERRIDE='DELETE')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assert_ldap_ssh_key_count(0)
| |
class Stats(object):
"""
Object for building query params for a global email statistics request
"""
def __init__(
self, start_date=None):
"""Create a Stats object
:param start_date: Date of when stats should begin in YYYY-MM-DD format, defaults to None
:type start_date: string, optional
"""
self._start_date = None
self._end_date = None
self._aggregated_by = None
self._sort_by_metric = None
self._sort_by_direction = None
self._limit = None
self._offset = None
# Minimum required for stats
if start_date:
self.start_date = start_date
def __str__(self):
"""Get a JSON representation of this object.
:rtype: string
"""
return str(self.get())
def get(self):
"""
Get a JSON-ready representation of Stats
:returns: This GlobalStats, ready for use in a request body.
:rtype: response stats dict
"""
stats = {}
if self.start_date is not None:
stats["start_date"] = self.start_date
if self.end_date is not None:
stats["end_date"] = self.end_date
if self.aggregated_by is not None:
stats["aggregated_by"] = self.aggregated_by
if self.sort_by_metric is not None:
stats["sort_by_metric"] = self.sort_by_metric
if self.sort_by_direction is not None:
stats["sort_by_direction"] = self.sort_by_direction
if self.limit is not None:
stats["limit"] = self.limit
if self.offset is not None:
stats["offset"] = self.offset
return stats
@property
def start_date(self):
"""Date of when stats should begin in YYYY-MM-DD format
:rtype: string
"""
return self._start_date
@start_date.setter
def start_date(self, value):
"""Date of when stats should begin in YYYY-MM-DD format
:param value: Date representing when stats should begin
:type value: string
"""
self._start_date = value
@property
def end_date(self):
"""Date of when stats should end in YYYY-MM-DD format
:rtype: string
"""
return self._end_date
@end_date.setter
def end_date(self, value):
"""Date of when stats should end in YYYY-MM-DD format
:param value: Date representing when stats should end
:type value: string
"""
self._end_date = value
@property
def aggregated_by(self):
"""Chosen period (e.g. 'day', 'week', 'month') for how stats get grouped
:rtype: string
"""
return self._aggregated_by
@aggregated_by.setter
def aggregated_by(self, value):
"""Chosen period (e.g. 'day', 'week', 'month') for how stats get grouped
:param value: Period for how keys will get formatted
:type value: string
"""
self._aggregated_by = value
@property
def sort_by_metric(self):
"""Metric to sort stats by
:rtype: string
"""
return self._sort_by_metric
@sort_by_metric.setter
def sort_by_metric(self, value):
"""Metric to sort stats by
:param value: Chosen metric stats will by sorted by
:type value: string
"""
self._sort_by_metric = value
@property
def sort_by_direction(self):
"""Direction data will be sorted, either 'asc' or 'desc'
:rtype: string
"""
return self._sort_by_direction
@sort_by_direction.setter
def sort_by_direction(self, value):
"""Direction data will be sorted, either 'asc' or 'desc'
:param value: Direction of data, either 'asc' or 'desc'
:type value: string
"""
self._sort_by_direction = value
@property
def limit(self):
"""Max amount of results to be returned
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, value):
"""Max amount of results to be returned
:param value: Max amount of results
:type value: int
"""
self._limit = value
@property
def offset(self):
"""Number of places a starting point of a data set will move
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, value):
"""Number of places a starting point of a data set will move
:param value: Number of positions to move from starting point
:type value: int
"""
self._offset = value
class CategoryStats(Stats):
"""
object for building query params for a category statistics request
"""
def __init__(self, start_date=None, categories=None):
"""Create a CategoryStats object
:param start_date: Date of when stats should begin in YYYY-MM-DD format, defaults to None
:type start_date: string, optional
:param categories: list of categories to get results of, defaults to None
:type categories: list(string), optional
"""
self._categories = None
super(CategoryStats, self).__init__()
# Minimum required for category stats
if start_date and categories:
self.start_date = start_date
for cat_name in categories:
self.add_category(Category(cat_name))
def get(self):
"""
Get a JSON-ready representation of this CategoryStats.
:return: response category stats dict
"""
stats = {}
if self.start_date is not None:
stats["start_date"] = self.start_date
if self.end_date is not None:
stats["end_date"] = self.end_date
if self.aggregated_by is not None:
stats["aggregated_by"] = self.aggregated_by
if self.sort_by_metric is not None:
stats["sort_by_metric"] = self.sort_by_metric
if self.sort_by_direction is not None:
stats["sort_by_direction"] = self.sort_by_direction
if self.limit is not None:
stats["limit"] = self.limit
if self.offset is not None:
stats["offset"] = self.offset
if self.categories is not None:
stats['categories'] = [category.get() for category in
self.categories]
return stats
@property
def categories(self):
"""List of categories
:rtype: list(Category)
"""
return self._categories
def add_category(self, category):
"""Appends a category to this object's category list
:param category: Category to append to CategoryStats
:type category: Category
"""
if self._categories is None:
self._categories = []
self._categories.append(category)
class SubuserStats(Stats):
"""
object of building query params for a subuser statistics request
"""
def __init__(self, start_date=None, subusers=None):
"""Create a SubuserStats object
:param start_date: Date of when stats should begin in YYYY-MM-DD format, defaults to None
:type start_date: string, optional
:param subusers: list of subusers to get results of, defaults to None
:type subusers: list(string), optional
"""
self._subusers = None
super(SubuserStats, self).__init__()
# Minimum required for subusers stats
if start_date and subusers:
self.start_date = start_date
for subuser_name in subusers:
self.add_subuser(Subuser(subuser_name))
def get(self):
"""
Get a JSON-ready representation of this SubuserStats.
:return: response subuser stats dict
"""
stats = {}
if self.start_date is not None:
stats["start_date"] = self.start_date
if self.end_date is not None:
stats["end_date"] = self.end_date
if self.aggregated_by is not None:
stats["aggregated_by"] = self.aggregated_by
if self.sort_by_metric is not None:
stats["sort_by_metric"] = self.sort_by_metric
if self.sort_by_direction is not None:
stats["sort_by_direction"] = self.sort_by_direction
if self.limit is not None:
stats["limit"] = self.limit
if self.offset is not None:
stats["offset"] = self.offset
if self.subusers is not None:
stats['subusers'] = [subuser.get() for subuser in
self.subusers]
return stats
@property
def subusers(self):
"""List of subusers
:rtype: list(Subuser)
"""
return self._subusers
def add_subuser(self, subuser):
"""Appends a subuser to this object's subuser list
:param subuser: Subuser to append to SubuserStats
:type subuser: Subuser
"""
if self._subusers is None:
self._subusers = []
self._subusers.append(subuser)
class Category(object):
"""
Represents a searchable statistics category to be used in a CategoryStats object
"""
def __init__(self, name=None):
"""Create a Category object
:param name: name of category, defaults to None
:type name: string, optional
"""
self._name = None
if name is not None:
self._name = name
@property
def name(self):
"""Get name of category
:rtype: string
"""
return self._name
@name.setter
def name(self, value):
"""Set name of category
:param value: name of the statistical category
:type value: string
"""
self._name = value
def get(self):
"""
Get a string representation of Category.
:return: string of the category's name
"""
return self.name
class Subuser(object):
"""
Represents a searchable subuser to be used in a SubuserStats object
"""
def __init__(self, name=None):
"""Create a Subuser object
:param name: name of subuser, defaults to None
:type name: string, optional
"""
self._name = None
if name is not None:
self._name = name
@property
def name(self):
"""Get name of the subuser
:rtype: string
"""
return self._name
@name.setter
def name(self, value):
"""Set name of the subuser
:param value: name of the subuser
:type value: string
"""
self._name = value
def get(self):
"""
Get a string representation of Subuser.
:return: string of the subuser's name
"""
return self.name
| |
from typing import Any, Dict
import tensorflow as tf
from keras import backend as K
from keras.regularizers import l1
from .masked_layer import MaskedLayer
class AdaptiveRecurrence:
'''
This recurrence class peforms an adaptive number of memory network steps. This is performed
in the AdaptiveStep layer. In order to take an adaptive number of steps, at each memory network
iteration, we compute a probability of halting via the dot product of the memory representation
and a parameter of the model. We are effectively implementing a single step of the Adaptive
Computation Time algorithm (Graves, 2016): https://arxiv.org/abs/1603.08983. The behaviour of
this recurrence is governed by the epsilon, max_computation and ponder_cost parameters, described
below.
Note that within this Layer, there is tensorflow code, which
therefore creates a dependency on using the tensorflow Keras backend.
Using this method of recurrence means building all the layers we use in the memory network step
_within_ the AdaptiveStep layer. This means that it is difficult to debug using Keras or the
debugging code within this repository, as all the parameters for intermediate layers will
be assigned to the AdaptiveLayer.
Additionally, because Tensorflow (and Theano) build static computational graphs, this recurrence
method only supports Layers within it's adaptive step that share weights across memory network steps.
The reason for this is because we don't know the number of steps we are going to do in advance,
Tensorflow can't generate all the required weights. However, if weight sharing is used, Tensorflow
can create one copy of the weights and although the number of iterations of the memory step is
non-deterministic, the computational graph can be statically defined - we just loop over a part of it.
'''
def __init__(self, memory_network, params: Dict[str, Any]):
if K.backend() == 'theano':
raise Exception("You are trying to use an adaptive method for performing memory network "
"steps using Keras with Theano as its backend. This recurrence method "
"requires Keras to use Tensorflow as it's backend, as it has native "
"tensorflow code mixed in which is not compatible with theano.")
self.memory_network = memory_network
self.adaptive_step_params = params
def __call__(self, encoded_question, current_memory, encoded_knowledge):
adaptive_layer = AdaptiveStep(self.memory_network, self.adaptive_step_params)
return adaptive_layer([encoded_question, current_memory, encoded_knowledge])
class AdaptiveStep(MaskedLayer):
'''
This layer implements a single step of the halting component of the Adaptive Computation Time algorithm,
generalised so that it can be applied to any arbitrary function. Here, that function is a single memory network
step. This can be seen as a differentiable while loop, where the halting condition is an accumulated
'halting probability' which is computed at every memory network step using the following formula:
halting_probability = sigmoid(W * memory_vector + b)
where W,b are parameters of the network. This halting probability is then accumulated and when it
increments over 1 - epsilon, we halt and return the current_memory and the attended_knowledge from
the last step.
The main machinery implemented here is to deal with doing this process with batched inputs.
There is a subtlety here regarding the batch_size, as clearly we will have samples halting
at different points in the batch. This is dealt with using logical masks to protect accumulated
probabilities, states and outputs from a timestep t's contribution if they have already reached
1 - epsilon at a timestep s < t.
'''
def __init__(self, memory_network, layer_params: Dict[str, Any],
initialization='glorot_uniform', name='adaptive_layer', **kwargs):
# Dictates the value at which we halt the memory network steps (1 - epsilon).
# Necessary so that the network can learn to halt after one step. If we didn't have
# this, the first halting value is < 1 in practise as it is the output of a sigmoid.
self.epsilon = layer_params.pop("epsilon", 0.01)
self.one_minus_epsilon = tf.constant(1.0 - self.epsilon, name='one_minus_epsilon')
# Used to bound the number of memory network hops we do. Necessary to prevent
# the network from learning that the loss it achieves can be minimised by
# simply not stopping.
self.max_val = layer_params.pop("max_computation", 10)
self.max_computation = tf.constant(self.max_val, tf.float32, name='max_computation')
# Regularisation coefficient for the ponder cost. In order to dictate how many steps you want
# to take, we add |number of steps| to the training objective, in the same way as you might add
# weight regularisation. This makes the model optimise performance whilst moderating the number
# of steps it takes. This parameter is _extremely_ sensitive. Consider as well that this parameter
# will affect the training time of your model, as it will take more steps if it is small. Bear this
# in mind when doing grid searches over this parameter.
self.ponder_cost_strength = layer_params.pop("ponder_cost_strength", 0.05)
self.memory_network = memory_network
self.init = initialization
self.name = name
# Attributes to be defined when we build this layer.
self.halting_weight = None
self.halting_bias = None
self.trainable_weights = []
super(AdaptiveStep, self).__init__(**kwargs)
def build(self, input_shape):
# pylint: disable=protected-access
'''
The only weight that this layer requires is used in a simple dot product with the current_memory
to generate the halting_probability. We define the weight shape with the 2nd input to this
layer, as this is the memory representation, which will dictate the required size. Note that this is
actually building layers defined within the Memory Network. We alter the size of the shapes for each
of the layers as they are done sequentially and would normally be built with the shape of the input.
Here, we have to specify it manually as we want to build them in advance, rather than when they are
called.
'''
input_dim = input_shape[1][-1]
self.halting_weight = self.add_weight(((input_dim,) + (1,)),
initializer=self.init,
name='{}_halting_weight'.format(self.name))
self.halting_bias = self.add_weight((),
initializer=self.init,
name='{}_halting_bias'.format(self.name))
self.trainable_weights = [self.halting_weight, self.halting_bias]
background_knowledge_shape = list(input_shape[2])
knowledge_selector_input_shape = list(background_knowledge_shape)
# Shape after appending original question and
knowledge_selector_input_shape[-2] += 2
knowledge_selector = self.memory_network._get_knowledge_selector(0)
knowledge_selector.build(tuple(knowledge_selector_input_shape))
self.trainable_weights.extend(knowledge_selector.trainable_weights)
knowledge_combiner_input_shape = list(background_knowledge_shape)
# Shape after appending the attention mask to the background knowledge.
knowledge_combiner_input_shape[-1] += 1
knowledge_combiner = self.memory_network._get_knowledge_combiner(0)
knowledge_combiner.build(tuple(knowledge_combiner_input_shape))
self.trainable_weights.extend(knowledge_combiner.trainable_weights)
memory_updater_input_shape = list(background_knowledge_shape)
# Shape after removing the knowledge_length dimension (post knowledge_combiner)
# and concatenating the original_question, current_memory and attended_knowledge.
memory_updater_input_shape.pop(-2)
memory_updater_input_shape[-1] *= 3
memory_updater = self.memory_network._get_memory_updater(0)
memory_updater.build(tuple(memory_updater_input_shape))
self.trainable_weights.extend(memory_updater.trainable_weights)
super(AdaptiveStep, self).build(input_shape)
def call(self, inputs, mask=None):
encoded_question, current_memory, encoded_knowledge = inputs
# We need to create a tensor which doesn't have the encoding_dim dimension. So that this Layer is
# independent of the dimension of the input tensors, we just sum over the last dimension to remove it.
# We only use this to create variables, nothing else.
memory_cell = K.sum(current_memory, -1)
# This is a boolean mask, holding whether a particular sample has halted.
batch_mask = tf.cast(tf.ones_like(memory_cell, name='batch_mask'), tf.bool)
# This counts the number of memory steps per sample.
hop_counter = tf.zeros_like(memory_cell, name='hop_counter')
# This accumulates the halting probabilities.
halting_accumulator = tf.zeros_like(memory_cell, name='halting_accumulator')
# This also accumulates the halting probabilities, with the difference being that if an
# outputed probability causes a particular sample to go over 1 - epsilon, this accumulates
# that value, but the halting_accumulator does not. This variable is _only_ used in the
# halting condition of the loop.
halting_accumulator_for_comparison = tf.zeros_like(memory_cell,
name='halting_acc_for_comparision')
# This accumulates the weighted memory vectors at each memory step. The memory is weighted by the
# halting probability and added to this accumulator.
memory_accumulator = tf.zeros_like(current_memory, name='memory_accumulator')
# We need the attended_knowledge from the last memory network step, so we create a dummy variable to
# input to the while_loop, as tensorflow requires the input signature to match the output signature.
attended_knowledge_loop_placeholder = tf.zeros_like(current_memory, name='attended_knowledge_placeholder')
# Add the ponder cost variable as a regulariser to the loss function.
ponder_cost = l1(self.ponder_cost_strength)
self.add_loss(ponder_cost(hop_counter))
# This actually does the computation of self.adaptive_memory_hop,
# checking the condition at every step to see if it should stop.
# The while loop has to produce as many variables as it has inputs - we only need the last two.
*_, current_memory, attended_knowledge = \
tf.while_loop(cond=self.halting_condition, body=self.adaptive_memory_hop,
loop_vars=[batch_mask,
halting_accumulator,
halting_accumulator_for_comparison,
hop_counter,
encoded_question,
current_memory,
encoded_knowledge,
memory_accumulator,
attended_knowledge_loop_placeholder
])
return [current_memory, attended_knowledge]
def halting_condition(self,
batch_mask,
halting_accumulator,
halting_accumulator_for_comparison,
hop_counter,
encoded_question,
current_memory,
encoded_knowledge,
memory_accumulator,
attended_knowledge_placeholder):
# Tensorflow requires that we use all of the variables used in the tf.while_loop as inputs to the
# condition for halting the loop, even though we only actually make use of two of them.
# pylint: disable=unused-argument
# This condition checks the batch elementwise to see if any of the accumulated halting
# probabilities have gone over one_minus_epsilon in the previous iteration.
probability_condition = tf.less(halting_accumulator_for_comparison, self.one_minus_epsilon)
# This condition checks the batch elementwise to see if any have taken more steps than the max allowed.
max_computation_condition = tf.less(hop_counter, self.max_computation)
# We only stop if both of the above conditions are true....
combined_conditions = tf.logical_and(probability_condition, max_computation_condition)
# ... for the entire batch.
return tf.reduce_any(combined_conditions)
def adaptive_memory_hop(self,
batch_mask,
halting_accumulator,
halting_accumulator_for_comparison,
hop_counter,
encoded_question,
previous_memory,
encoded_knowledge,
memory_accumulator,
attended_knowledge):
'''
In this method, we do a full step of the memory network and generate the probability of halting
per example, followed by various updates to counters and masks which are used to control the halting
mechanism for the batch of samples.
'''
# First things first: let's actually do a memory network step. This is exactly the same as in the
# vanilla memory network.
current_memory, attended_knowledge = self.memory_network.memory_step(
encoded_question, previous_memory, encoded_knowledge)
# Here, we are computing the probability that each sample in the batch will halt at this iteration.
# This outputs a vector of probabilities of shape (samples, ), or (samples, num_options) for memory
# networks with multiple answer options.
with tf.variable_scope("halting_calculation"):
halting_probability = tf.squeeze(tf.sigmoid(
K.dot(current_memory, self.halting_weight) + self.halting_bias), [-1])
# This is where the loop condition variables are controlled, which takes several steps.
# First, we compute a new batch mask, which will be of size (samples, ). We want there
# to be 0s where a given sample's adaptive loop should have halted. To check this, we
# compare element-wise the halting_accumulator plus this iteration's halting probabilities
# to see if they are less than 1 - epsilon. Additionally, if a given sample had halted at
# the previous batch, we don't want these to accidentally start again in this iteration,
# so we also compare to the previous batch_mask using logical and.
# Example of why we need to protect against the above scenario:
# If we were at 0.8 and generated a probability of 0.3, which would take us over 1 - epsilon.
# We then don't add this to the halting_accumulator, and then in the next iteration, we
# generate 0.1, which would not take us over the limit, as the halting_accumulator is still
# at 0.8. However, we don't want to consider this contribution, as we have already halted.
new_batch_mask = tf.logical_and(
tf.less(halting_accumulator + halting_probability, self.one_minus_epsilon), batch_mask)
# Next, we update the halting_accumulator by adding on the halting_probabilities from this
# iteration, masked by the new_batch_mask. Note that this means that if the halting_probability
# for a given sample has caused the accumulator to go over 1 - epsilon, we DO NOT update this
# value in the halting_accumulator. Values in this accumulator can never be over 1 - epsilon.
new_float_mask = tf.cast(new_batch_mask, tf.float32)
halting_accumulator += halting_probability * new_float_mask
# Finally, we update the halting_accumulator_for_comparison, which is only used in
# the halting condition in the while_loop. Note that here, we are adding on the halting
# probabilities multiplied by the previous iteration's batch_mask, which means that we
# DO update samples over 1 - epsilon. This means that we can check in the loop condition
# to see if all samples are over 1 - epsilon, which means we should halt the while_loop.
halting_accumulator_for_comparison += halting_probability * tf.cast(batch_mask, tf.float32)
# This just counts the number of memory network steps we take for each sample.
# We use this for regularisation - by adding this to the loss function, we can bias
# the network to take fewer steps.
hop_counter += new_float_mask
# This condition checks whether a sample has gone over the permitted number of memory steps.
counter_condition = tf.less(hop_counter, self.max_computation)
# If a given sample is under the max number of steps AND not yet halted, we use the "use_probability"
# value in the conditional below. This option is just accumulating the memory network state, as the
# output of this whole loop is a weighted sum of the memory representations with respect to the
# halting probabilities at each step. Additionally, we multiply by the previous batch mask so that
# if we have previously stopped for a given batch, we never add any more on in a future timestep.
# The second "use_remainder" option is taken when a given batch should halt, determined by the
# final_iteration_condition. Instead of using the final halting_probability, we use
# 1 - _halting_accumulator, due to the 1 - epsilon halting condition, as the final probability
# also needs to take into account this epsilon value.
not_final_iteration_condition = tf.expand_dims(tf.logical_and(new_batch_mask, counter_condition), -1)
use_probability = tf.expand_dims(halting_probability, -1)
use_remainder = tf.expand_dims(1.0 - halting_accumulator, -1)
memory_update_weight = tf.where(not_final_iteration_condition, use_probability, use_remainder)
expanded_batch_mask = tf.expand_dims(tf.cast(batch_mask, tf.float32), -1)
memory_accumulator += current_memory * memory_update_weight * expanded_batch_mask
# We have to return all of these values as a requirement of the tf.while_loop. Some of them,
# we haven't updated, such as the encoded_question and encoded_knowledge.
return [new_batch_mask,
halting_accumulator,
halting_accumulator_for_comparison,
hop_counter,
encoded_question,
current_memory,
encoded_knowledge,
memory_accumulator,
attended_knowledge]
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
# We don't want to mask either of the outputs here, so we return None for both of them.
return [None, None]
def compute_output_shape(self, input_shapes):
# We output two tensors from this layer, the final memory representation and
# the attended knowledge from the final memory network step. Both have the same
# shape as the initial memory vector, likely (samples, encoding_dim), which is
# passed in as the 2nd argument, so we return this shape twice.
return [input_shapes[1], input_shapes[1]]
def get_config(self):
config = {
# TODO: This won't work when we reload the model.
'memory_network': self.memory_network.__class__,
'name': self.name,
'init': self.init,
'ponder_cost_strength': self.ponder_cost_strength,
'epsilon': self.epsilon,
'max_computation': self.max_val
}
base_config = super(AdaptiveStep, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
import base64
import re
import struct
from hashlib import md5, sha1
from socket import error as socket_error
from urllib import quote
from gevent.pywsgi import WSGIHandler
from geventwebsocket.websocket import WebSocketHybi, WebSocketHixie
class WebSocketHandler(WSGIHandler):
"""Automatically upgrades the connection to websockets.
To prevent the WebSocketHandler to call the underlying WSGI application,
but only setup the WebSocket negotiations, do:
mywebsockethandler.prevent_wsgi_call = True
before calling handle_one_response(). This is useful if you want to do
more things before calling the app, and want to off-load the WebSocket
negotiations to this library. Socket.IO needs this for example, to
send the 'ack' before yielding the control to your WSGI app.
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
SUPPORTED_VERSIONS = ('13', '8', '7')
def handle_one_response(self):
self.pre_start()
environ = self.environ
upgrade = environ.get('HTTP_UPGRADE', '').lower()
if upgrade == 'websocket':
connection = environ.get('HTTP_CONNECTION', '').lower()
if 'upgrade' in connection:
return self._handle_websocket()
return super(WebSocketHandler, self).handle_one_response()
def pre_start(self):
pass
def _fake_start_response(self, *args, **kwargs):
return None
def _handle_websocket(self):
environ = self.environ
try:
if environ.get("HTTP_SEC_WEBSOCKET_VERSION"):
self.close_connection = True
result = self._handle_hybi()
elif environ.get("HTTP_ORIGIN"):
self.close_connection = True
result = self._handle_hixie()
self.result = []
if not result:
return
if not hasattr(self, 'prevent_wsgi_call'):
self.application(environ, self._fake_start_response)
return []
finally:
self.log_request()
def _handle_hybi(self):
environ = self.environ
version = environ.get("HTTP_SEC_WEBSOCKET_VERSION")
environ['wsgi.websocket_version'] = 'hybi-%s' % version
if version not in self.SUPPORTED_VERSIONS:
self.log_error('400: Unsupported Version: %r', version)
self.respond(
'400 Unsupported Version',
[('Sec-WebSocket-Version', '13, 8, 7')]
)
return
protocol, version = self.request_version.split("/")
key = environ.get("HTTP_SEC_WEBSOCKET_KEY")
# check client handshake for validity
if not environ.get("REQUEST_METHOD") == "GET":
# 5.2.1 (1)
self.respond('400 Bad Request')
return
elif not protocol == "HTTP":
# 5.2.1 (1)
self.respond('400 Bad Request')
return
elif float(version) < 1.1:
# 5.2.1 (1)
self.respond('400 Bad Request')
return
# XXX: nobody seems to set SERVER_NAME correctly. check the spec
#elif not environ.get("HTTP_HOST") == environ.get("SERVER_NAME"):
# 5.2.1 (2)
#self.respond('400 Bad Request')
#return
elif not key:
# 5.2.1 (3)
self.log_error('400: HTTP_SEC_WEBSOCKET_KEY is missing from request')
self.respond('400 Bad Request')
return
elif len(base64.b64decode(key)) != 16:
# 5.2.1 (3)
self.log_error('400: Invalid key: %r', key)
self.respond('400 Bad Request')
return
self.websocket = WebSocketHybi(self.socket, environ)
environ['wsgi.websocket'] = self.websocket
headers = [
("Upgrade", "websocket"),
("Connection", "Upgrade"),
("Sec-WebSocket-Accept", base64.b64encode(sha1(key + self.GUID).digest())),
]
self._send_reply("101 Switching Protocols", headers)
return True
def _handle_hixie(self):
environ = self.environ
assert "upgrade" in self.environ.get("HTTP_CONNECTION", "").lower()
self.websocket = WebSocketHixie(self.socket, environ)
environ['wsgi.websocket'] = self.websocket
key1 = self.environ.get('HTTP_SEC_WEBSOCKET_KEY1')
key2 = self.environ.get('HTTP_SEC_WEBSOCKET_KEY2')
if key1 is not None:
environ['wsgi.websocket_version'] = 'hixie-76'
if not key1:
self.log_error("400: SEC-WEBSOCKET-KEY1 header is empty")
self.respond('400 Bad Request')
return
if not key2:
self.log_error("400: SEC-WEBSOCKET-KEY2 header is missing or empty")
self.respond('400 Bad Request')
return
part1 = self._get_key_value(key1)
part2 = self._get_key_value(key2)
if part1 is None or part2 is None:
self.respond('400 Bad Request')
return
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("Sec-WebSocket-Location", reconstruct_url(environ)),
]
if self.websocket.protocol is not None:
headers.append(("Sec-WebSocket-Protocol", self.websocket.protocol))
if self.websocket.origin:
headers.append(("Sec-WebSocket-Origin", self.websocket.origin))
self._send_reply("101 Web Socket Protocol Handshake", headers)
# This request should have 8 bytes of data in the body
key3 = self.rfile.read(8)
challenge = md5(struct.pack("!II", part1, part2) + key3).digest()
self.socket.sendall(challenge)
return True
else:
environ['wsgi.websocket_version'] = 'hixie-75'
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("WebSocket-Location", reconstruct_url(environ)),
]
if self.websocket.protocol is not None:
headers.append(("WebSocket-Protocol", self.websocket.protocol))
if self.websocket.origin:
headers.append(("WebSocket-Origin", self.websocket.origin))
self._send_reply("101 Web Socket Protocol Handshake", headers)
def _send_reply(self, status, headers):
self.status = status
towrite = []
towrite.append('%s %s\r\n' % (self.request_version, self.status))
for header in headers:
towrite.append("%s: %s\r\n" % header)
towrite.append("\r\n")
msg = ''.join(towrite)
self.socket.sendall(msg)
self.headers_sent = True
def respond(self, status, headers=[]):
self.close_connection = True
self._send_reply(status, headers)
if self.socket is not None:
try:
self.socket._sock.close()
self.socket.close()
except socket_error:
pass
def _get_key_value(self, key_value):
key_number = int(re.sub("\\D", "", key_value))
spaces = re.subn(" ", "", key_value)[1]
if key_number % spaces != 0:
self.log_error("key_number %d is not an intergral multiple of spaces %d", key_number, spaces)
else:
return key_number / spaces
def reconstruct_url(environ):
secure = environ['wsgi.url_scheme'] == 'https'
if secure:
url = 'wss://'
else:
url = 'ws://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if secure:
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME', ''))
url += quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
| |
<<<<<<< HEAD
<<<<<<< HEAD
from test import support
gdbm = support.import_module("dbm.gnu") #skip if not supported
import unittest
import os
from test.support import verbose, TESTFN, unlink
filename = TESTFN
class TestGdbm(unittest.TestCase):
def setUp(self):
self.g = None
def tearDown(self):
if self.g is not None:
self.g.close()
unlink(filename)
def test_key_methods(self):
self.g = gdbm.open(filename, 'c')
self.assertEqual(self.g.keys(), [])
self.g['a'] = 'b'
self.g['12345678910'] = '019237410982340912840198242'
self.g[b'bytes'] = b'data'
key_set = set(self.g.keys())
self.assertEqual(key_set, set([b'a', b'bytes', b'12345678910']))
self.assertIn('a', self.g)
self.assertIn(b'a', self.g)
self.assertEqual(self.g[b'bytes'], b'data')
key = self.g.firstkey()
while key:
self.assertIn(key, key_set)
key_set.remove(key)
key = self.g.nextkey(key)
self.assertRaises(KeyError, lambda: self.g['xxx'])
# get() and setdefault() work as in the dict interface
self.assertEqual(self.g.get(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g[b'xxx'], b'foo')
def test_error_conditions(self):
# Try to open a non-existent database.
unlink(filename)
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
# Try to access a closed database.
self.g = gdbm.open(filename, 'c')
self.g.close()
self.assertRaises(gdbm.error, lambda: self.g['a'])
# try pass an invalid open flag
self.assertRaises(gdbm.error, lambda: gdbm.open(filename, 'rx').close())
def test_flags(self):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
modes = all - set('fsu')
for mode in sorted(modes): # put "c" mode first
self.g = gdbm.open(filename, mode)
self.g.close()
# Test additional flags (presumably "fsu").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
self.g = gdbm.open(filename, mode + flag)
self.g.close()
def test_reorganize(self):
self.g = gdbm.open(filename, 'c')
size0 = os.path.getsize(filename)
self.g['x'] = 'x' * 10000
size1 = os.path.getsize(filename)
self.assertTrue(size0 < size1)
del self.g['x']
# 'size' is supposed to be the same even after deleting an entry.
self.assertEqual(os.path.getsize(filename), size1)
self.g.reorganize()
size2 = os.path.getsize(filename)
self.assertTrue(size1 > size2 >= size0)
def test_context_manager(self):
with gdbm.open(filename, 'c') as db:
db["gdbm context manager"] = "context manager"
with gdbm.open(filename, 'r') as db:
self.assertEqual(list(db.keys()), [b"gdbm context manager"])
with self.assertRaises(gdbm.error) as cm:
db.keys()
self.assertEqual(str(cm.exception),
"GDBM object has already been closed")
if __name__ == '__main__':
unittest.main()
=======
from test import support
gdbm = support.import_module("dbm.gnu") #skip if not supported
import unittest
import os
from test.support import verbose, TESTFN, unlink
filename = TESTFN
class TestGdbm(unittest.TestCase):
def setUp(self):
self.g = None
def tearDown(self):
if self.g is not None:
self.g.close()
unlink(filename)
def test_key_methods(self):
self.g = gdbm.open(filename, 'c')
self.assertEqual(self.g.keys(), [])
self.g['a'] = 'b'
self.g['12345678910'] = '019237410982340912840198242'
self.g[b'bytes'] = b'data'
key_set = set(self.g.keys())
self.assertEqual(key_set, set([b'a', b'bytes', b'12345678910']))
self.assertIn('a', self.g)
self.assertIn(b'a', self.g)
self.assertEqual(self.g[b'bytes'], b'data')
key = self.g.firstkey()
while key:
self.assertIn(key, key_set)
key_set.remove(key)
key = self.g.nextkey(key)
self.assertRaises(KeyError, lambda: self.g['xxx'])
# get() and setdefault() work as in the dict interface
self.assertEqual(self.g.get(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g[b'xxx'], b'foo')
def test_error_conditions(self):
# Try to open a non-existent database.
unlink(filename)
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
# Try to access a closed database.
self.g = gdbm.open(filename, 'c')
self.g.close()
self.assertRaises(gdbm.error, lambda: self.g['a'])
# try pass an invalid open flag
self.assertRaises(gdbm.error, lambda: gdbm.open(filename, 'rx').close())
def test_flags(self):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
modes = all - set('fsu')
for mode in sorted(modes): # put "c" mode first
self.g = gdbm.open(filename, mode)
self.g.close()
# Test additional flags (presumably "fsu").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
self.g = gdbm.open(filename, mode + flag)
self.g.close()
def test_reorganize(self):
self.g = gdbm.open(filename, 'c')
size0 = os.path.getsize(filename)
self.g['x'] = 'x' * 10000
size1 = os.path.getsize(filename)
self.assertTrue(size0 < size1)
del self.g['x']
# 'size' is supposed to be the same even after deleting an entry.
self.assertEqual(os.path.getsize(filename), size1)
self.g.reorganize()
size2 = os.path.getsize(filename)
self.assertTrue(size1 > size2 >= size0)
def test_context_manager(self):
with gdbm.open(filename, 'c') as db:
db["gdbm context manager"] = "context manager"
with gdbm.open(filename, 'r') as db:
self.assertEqual(list(db.keys()), [b"gdbm context manager"])
with self.assertRaises(gdbm.error) as cm:
db.keys()
self.assertEqual(str(cm.exception),
"GDBM object has already been closed")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from test import support
gdbm = support.import_module("dbm.gnu") #skip if not supported
import unittest
import os
from test.support import verbose, TESTFN, unlink
filename = TESTFN
class TestGdbm(unittest.TestCase):
def setUp(self):
self.g = None
def tearDown(self):
if self.g is not None:
self.g.close()
unlink(filename)
def test_key_methods(self):
self.g = gdbm.open(filename, 'c')
self.assertEqual(self.g.keys(), [])
self.g['a'] = 'b'
self.g['12345678910'] = '019237410982340912840198242'
self.g[b'bytes'] = b'data'
key_set = set(self.g.keys())
self.assertEqual(key_set, set([b'a', b'bytes', b'12345678910']))
self.assertIn('a', self.g)
self.assertIn(b'a', self.g)
self.assertEqual(self.g[b'bytes'], b'data')
key = self.g.firstkey()
while key:
self.assertIn(key, key_set)
key_set.remove(key)
key = self.g.nextkey(key)
self.assertRaises(KeyError, lambda: self.g['xxx'])
# get() and setdefault() work as in the dict interface
self.assertEqual(self.g.get(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g[b'xxx'], b'foo')
def test_error_conditions(self):
# Try to open a non-existent database.
unlink(filename)
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
# Try to access a closed database.
self.g = gdbm.open(filename, 'c')
self.g.close()
self.assertRaises(gdbm.error, lambda: self.g['a'])
# try pass an invalid open flag
self.assertRaises(gdbm.error, lambda: gdbm.open(filename, 'rx').close())
def test_flags(self):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
modes = all - set('fsu')
for mode in sorted(modes): # put "c" mode first
self.g = gdbm.open(filename, mode)
self.g.close()
# Test additional flags (presumably "fsu").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
self.g = gdbm.open(filename, mode + flag)
self.g.close()
def test_reorganize(self):
self.g = gdbm.open(filename, 'c')
size0 = os.path.getsize(filename)
self.g['x'] = 'x' * 10000
size1 = os.path.getsize(filename)
self.assertTrue(size0 < size1)
del self.g['x']
# 'size' is supposed to be the same even after deleting an entry.
self.assertEqual(os.path.getsize(filename), size1)
self.g.reorganize()
size2 = os.path.getsize(filename)
self.assertTrue(size1 > size2 >= size0)
def test_context_manager(self):
with gdbm.open(filename, 'c') as db:
db["gdbm context manager"] = "context manager"
with gdbm.open(filename, 'r') as db:
self.assertEqual(list(db.keys()), [b"gdbm context manager"])
with self.assertRaises(gdbm.error) as cm:
db.keys()
self.assertEqual(str(cm.exception),
"GDBM object has already been closed")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
#
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''This code manages the name mangling (and reversal of such) that needs to
happen in the templates and follow-on logic in the runner. E.g. based on the
name of a component instance, we need to construct a name of the control TCB.
The logic for performing that translation and (if necessary) reversing it later
is encapsulated here so it can more easily be modified.
Callers should only import and use the Perspective class. When instantiating
one of these, generally as much information as is known should be provided to
give Perspective the opportunity to spot internal inconsistencies. See the
comments in the class itself for further information.'''
from camkes.internal.dictutils import get_fields
import re
class Deriver(object):
'''Logic for constructing one symbol from one or more other symbols. This
class itself is never intended to be directly instantiated and is probably
best understood by looking at its inherited children.'''
def inputs(self):
raise NotImplementedError
def output(self):
raise NotImplementedError
def derive(self, perspective):
raise NotImplementedError
class ForwardDeriver(Deriver):
'''Logic for deriving one symbol from several other symbols by way of
concatenation, interspersed with other static text.'''
def __init__(self, format, out):
self.format = format
self.out = out
def inputs(self):
return get_fields(self.format)
def output(self):
return self.out
def derive(self, perspective):
return self.format % perspective
class BackwardDeriver(Deriver):
'''Logic for deriving one symbol from one other symbol by pulling out a
substring of the input.'''
def __init__(self, regex, input, out):
self.regex = re.compile(regex)
self.input = input
self.out = out
def inputs(self):
return set([self.input])
def output(self):
return self.out
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return m.group(1)
# The remaining derivers are for specific symbols (or qualities) that are not
# strings. These each need slightly inflected logic.
class ControlDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'control'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
class FromControlDeriver(ForwardDeriver):
def derive(self, perspective):
if not perspective.get('control', False):
return None
return self.format % perspective
class DMAFrameIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'dma_frame_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
# Phases.
RUNNER, TEMPLATES, FILTERS = range(3)
# Instantiate the derivers to describe how name mangling happens in CAmkES. If
# you want to modify the name mangling scheme, this is the place to do it.
DERIVATIONS = {
RUNNER:[
ForwardDeriver('pd_%(group)s_group_bin', 'pd'),
ForwardDeriver('pd_%(elf_name)s', 'pd'),
BackwardDeriver(r'^pd_(.+)$', 'pd', 'elf_name'),
BackwardDeriver(r'^pd_(.+)_group_bin$', 'pd', 'group'),
ForwardDeriver('cnode_%(group)s', 'cnode'),
BackwardDeriver(r'^cnode_(.+)$', 'cnode', 'group'),
], TEMPLATES:[
ForwardDeriver('dma_frame_%(dma_frame_index)04d', 'dma_frame_symbol'),
DMAFrameIndexDeriver(r'^dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ForwardDeriver('_camkes_ipc_buffer_%(instance)s_%(interface)s', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(instance)s_0_control', 'ipc_buffer_symbol'),
ControlDeriver(r'^_camkes_ipc_buffer_.+_0_control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(instance)s_%(interface)s', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(instance)s_0_control', 'stack_symbol'),
ControlDeriver(r'^_camkes_stack_.+_0_control$', 'stack_symbol'),
ForwardDeriver('%(dataport)s_data', 'dataport_symbol'),
BackwardDeriver(r'^([^ ]+)_data$', 'dataport_symbol', 'dataport'),
ForwardDeriver('%(to_interface)s_attributes', 'hardware_attribute'),
BackwardDeriver(r'^(.+)_attributes', 'hardware_attribute', 'to_interface'),
ForwardDeriver('%(to_interface)s_cached', 'hardware_cached'),
BackwardDeriver(r'^(.+)_cached', 'hardware_cached', 'to_interface'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'^(.+)_group_bin', 'elf_name', 'group'),
ForwardDeriver('%(instance)s_main', 'entry_symbol'),
BackwardDeriver(r'^(.+)_main$', 'entry_symbol', 'instance'),
ForwardDeriver('%(instance)s_tls_setup', 'tls_symbol'),
BackwardDeriver(r'^(.+)_tls_setup$', 'tls_symbol', 'instance'),
ForwardDeriver('camkes_dma_pool', 'dma_pool_symbol'),
ControlDeriver(r'_passive$', 'passive_attribute'),
FromControlDeriver('_passive', 'passive_attribute'),
ForwardDeriver('%(interface)s_passive', 'passive_attribute'),
BackwardDeriver(r'([^_].*)_passive$', 'passive_attribute', 'interface'),
], FILTERS:[
ForwardDeriver('%(instance)s_tcb_%(interface)s', 'tcb'),
FromControlDeriver('%(instance)s_tcb_0_control', 'tcb'),
BackwardDeriver(r'^(.+)_tcb_.+$', 'tcb', 'instance'),
BackwardDeriver(r'^.+_tcb_([a-zA-Z_]\w*)$', 'tcb', 'interface'),
ControlDeriver(r'^.+_tcb_0_control$', 'tcb'),
ForwardDeriver('%(instance)s_sc_%(interface)s', 'sc'),
ForwardDeriver('%(instance)s_sc_%(interface)s__init', 'sc_init'),
FromControlDeriver('%(instance)s_sc__control', 'sc'),
BackwardDeriver(r'^(.+)_sc_.+$', 'sc', 'instance'),
BackwardDeriver(r'^.+_sc_([^_].*)$', 'sc', 'interface'),
ControlDeriver(r'^.+_sc__control$', 'sc'),
ForwardDeriver('_camkes_ipc_buffer_%(instance)s_%(interface)s', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(instance)s_0_control', 'ipc_buffer_symbol'),
ControlDeriver(r'^_camkes_ipc_buffer_.+_0_control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(instance)s_%(interface)s', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(instance)s_0_control', 'stack_symbol'),
ControlDeriver(r'^_camkes_stack_.+_0_control$', 'stack_symbol'),
ForwardDeriver('camkes %(instance)s_main', 'entry_symbol'),
BackwardDeriver(r'^camkes (.+)_main$', 'entry_symbol', 'instance'),
ForwardDeriver('camkes %(instance)s_tls_setup', 'tls_symbol'),
BackwardDeriver(r'^camkes (.+)_tls_setup$', 'tls_symbol', 'instance'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'^(.+)_group_bin', 'elf_name', 'group'),
PoolDeriver(r'.+_tcb_pool_[0-9]+$', 'tcb'),
PoolIndexDeriver(r'.+_tcb_pool_([0-9]+)$', 'tcb'),
ForwardDeriver('pd_%(group)s_group_bin', 'pd'),
ForwardDeriver('pd_%(elf_name)s', 'pd'),
BackwardDeriver(r'^pd_(.+)$', 'pd', 'elf_name'),
BackwardDeriver(r'^pd_(.+)_group_bin$', 'pd', 'group'),
ForwardDeriver('camkes %(instance)s %(dataport)s data', 'dataport_symbol'),
BackwardDeriver(r'^camkes ([^ ]+) [^ ]+ data$', 'dataport_symbol', 'instance'),
BackwardDeriver(r'^camkes [^ ]+ ([^ ]+) data$', 'dataport_symbol', 'dataport'),
ForwardDeriver('%(to_interface)s_attributes', 'hardware_attribute'),
BackwardDeriver(r'^(.+)_attributes', 'hardware_attribute', 'to_interface'),
ForwardDeriver('%(to_interface)s_cached', 'hardware_cached'),
BackwardDeriver(r'^(.+)_cached', 'hardware_cached', 'to_interface'),
ForwardDeriver('camkes %(instance)s_dma_pool', 'dma_pool_symbol'),
BackwardDeriver(r'^camkes (.+)_dma_pool$', 'dma_pool_symbol', 'instance'),
ForwardDeriver('%(instance)s_dma_frame_%(dma_frame_index)04d', 'dma_frame_symbol'),
BackwardDeriver(r'^(.+)_dma_frame_[0-9]+$', 'dma_frame_symbol', 'instance'),
DMAFrameIndexDeriver(r'^.+_dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ControlDeriver(r'^_priority$', 'priority_attribute'),
FromControlDeriver('_priority', 'priority_attribute'),
ForwardDeriver('%(interface)s_priority', 'priority_attribute'),
BackwardDeriver(r'^([a-zA-Z_]\w*)_priority$', 'priority_attribute', 'interface'),
ControlDeriver(r'^_max_priority$', 'max_priority_attribute'),
FromControlDeriver('_max_priority', 'max_priority_attribute'),
ForwardDeriver('%(interface)s_max_priority', 'max_priority_attribute'),
BackwardDeriver(r'^([^_].*)_max_priority$', 'max_priority_attribute', 'interface'),
ControlDeriver(r'^_criticality$', 'criticality_attribute'),
FromControlDeriver('_criticality', 'criticality_attribute'),
ForwardDeriver('%(interface)s_criticality', 'criticality_attribute'),
BackwardDeriver(r'^([^_].*)_criticality$', 'criticality_attribute', 'interface'),
ControlDeriver(r'^_max_criticality$', 'max_criticality_attribute'),
FromControlDeriver('_max_criticality', 'max_criticality_attribute'),
ForwardDeriver('%(interface)s_max_criticality', 'max_criticality_attribute'),
BackwardDeriver(r'^([^_].*)_max_criticality$', 'max_criticality_attribute', 'interface'),
ControlDeriver(r'^_domain$', 'domain_attribute'),
FromControlDeriver('_domain', 'domain_attribute'),
ForwardDeriver('%(interface)s_domain', 'domain_attribute'),
BackwardDeriver(r'^([a-zA-Z_]\w*)_domain$', 'domain_attribute', 'interface'),
ControlDeriver(r'_passive$', 'passive_attribute'),
FromControlDeriver('_passive', 'passive_attribute'),
ForwardDeriver('%(interface)s_passive', 'passive_attribute'),
BackwardDeriver(r'([^_].*)_passive$', 'passive_attribute', 'interface'),
ControlDeriver(r'^_period$', 'period_attribute'),
FromControlDeriver('_period', 'period_attribute'),
ForwardDeriver('%(interface)s_period', 'period_attribute'),
BackwardDeriver(r'^([^_].*)_period$', 'period_attribute', 'interface'),
ControlDeriver(r'^_budget$', 'budget_attribute'),
FromControlDeriver('_budget', 'budget_attribute'),
ForwardDeriver('%(interface)s_budget', 'budget_attribute'),
BackwardDeriver(r'^([^_].*)_budget$', 'budget_attribute', 'interface'),
ControlDeriver(r'^_data$', 'data_attribute'),
FromControlDeriver('_data', 'data_attribute'),
ForwardDeriver('%(interface)s_data', 'data_attribute'),
BackwardDeriver(r'^([^_].*)_data$', 'data_attribute', 'interface'),
ForwardDeriver('cnode_%(group)s', 'cnode'),
BackwardDeriver(r'^cnode_(.+)$', 'cnode', 'group'),
],
}
class Perspective(object):
'''A partial state from which to mangle symbols. That may make no sense,
but consider this as a collection of *some* of the symbols we need from
which *all* the symbols we need can be derived. You need to pass some
initial symbols in to the constructor. These may not be sufficient to
derive all other known symbols, but they must be sufficient to derive any
you need. The known symbols can be updated at any point via __setitem__. A
more appropriate name for this class would be 'context', but I didn't want
to cause confusion by introducing yet another 'context' into this code
base.'''
def __init__(self, phase=FILTERS, **kwargs):
self.kwargs = kwargs
self.derivations = DERIVATIONS[phase]
if __debug__:
# When optimisations are not enabled, infer everything possible
# upfront (not lazily). This can catch some internal
# inconsistencies though we will probably end up inferring things
# we don't need.
self._infer()
def _infer(self, limit=None):
'''Infer some or all possible unknown symbols. If the limit argument is
given, inference stops when we know that symbol.'''
prev_keys = set(self.kwargs.keys())
while limit is None or limit not in prev_keys:
for d in self.derivations:
if d.inputs() <= set(self.kwargs.keys()):
# We have enough information to use this derivation.
v = d.derive(self.kwargs)
if v is None:
# We could not derive this value.
continue
k = d.output()
if k in self.kwargs:
# We already knew this symbol. It had better have been
# the same as what we just derived for consistency.
assert self.kwargs[k] == v, \
'perspective is internally inconsistent for key: %s, kwargs: %s' % (k, self.kwargs)
else:
self.kwargs[k] = v
next_keys = set(self.kwargs.keys())
if prev_keys == next_keys:
# We didn't learn anything new this time around.
break
prev_keys = next_keys
def __setitem__(self, key, value):
assert key not in self.kwargs or self.kwargs[key] == value
# The following assertion is conservative. In the future, it may make
# sense to set some 'core' strings that we cannot infer.
assert key in map(lambda x: x.output(), self.derivations), \
'setting \'%s\' that is not inferrable' % key
self.kwargs[key] = value
if __debug__:
self._infer()
def __getitem__(self, key):
# As for the assertion in __setitem__, this is conservative.
assert key in map(lambda x: x.output(), self.derivations), \
'getting \'%s\' that is not inferrable' % key
if key not in self.kwargs:
self._infer(key)
assert key in self.kwargs, \
'not enough information to infer attribute for key: %s, kwargs: %s' % (key, self.kwargs)
return self.kwargs[key]
| |
from pgmagick import _pgmagick
__version__ = '0.5.5'
def __init():
_pgmagick.InitializeMagick("./")
__init()
class Blob(_pgmagick.Blob):
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], str):
_pgmagick.Blob.__init__(self)
self.update(args[0])
else:
_pgmagick.Blob.__init__(self, *args)
data = property(_pgmagick.get_blob_data, _pgmagick.Blob.update)
ChannelType = _pgmagick.ChannelType
class Color(_pgmagick.Color):
pass
class ColorGray(_pgmagick.ColorGray):
pass
class ColorHSL(_pgmagick.ColorHSL):
pass
class ColorMono(_pgmagick.ColorMono):
pass
class ColorRGB(_pgmagick.ColorRGB):
pass
class ColorYUV(_pgmagick.ColorYUV):
pass
ColorspaceType = _pgmagick.ColorspaceType
CompositeOperator = _pgmagick.CompositeOperator
CompressionType = _pgmagick.CompressionType
class Coordinate(_pgmagick.Coordinate):
pass
class CoordinateList(_pgmagick.CoordinateList):
pass
DecorationType = _pgmagick.DecorationType
class Drawable(_pgmagick.Drawable):
pass
class DrawableAffine(_pgmagick.DrawableAffine):
pass
class DrawableArc(_pgmagick.DrawableArc):
pass
class DrawableBezier(_pgmagick.DrawableBezier):
pass
class DrawableCircle(_pgmagick.DrawableCircle):
pass
class DrawableClipPath(_pgmagick.DrawableClipPath):
pass
class DrawableColor(_pgmagick.DrawableColor):
pass
class DrawableCompositeImage(_pgmagick.DrawableCompositeImage):
pass
class DrawableDashArray(_pgmagick.DrawableDashArray):
pass
class DrawableDashOffset(_pgmagick.DrawableDashOffset):
pass
class DrawableEllipse(_pgmagick.DrawableEllipse):
pass
class DrawableFillColor(_pgmagick.DrawableFillColor):
pass
class DrawableFillOpacity(_pgmagick.DrawableFillOpacity):
pass
class DrawableFillRule(_pgmagick.DrawableFillRule):
pass
class DrawableFont(_pgmagick.DrawableFont):
pass
class DrawableGravity(_pgmagick.DrawableGravity):
pass
class DrawableLine(_pgmagick.DrawableLine):
pass
class DrawableList(_pgmagick.DrawableList):
pass
class DrawableMatte(_pgmagick.DrawableMatte):
pass
class DrawableMiterLimit(_pgmagick.DrawableMiterLimit):
pass
class DrawablePath(_pgmagick.DrawablePath):
pass
class DrawablePoint(_pgmagick.DrawablePoint):
pass
class DrawablePointSize(_pgmagick.DrawablePointSize):
pass
class DrawablePolygon(_pgmagick.DrawablePolygon):
pass
class DrawablePolyline(_pgmagick.DrawablePolyline):
pass
class DrawablePopClipPath(_pgmagick.DrawablePopClipPath):
pass
class DrawablePopGraphicContext(_pgmagick.DrawablePopGraphicContext):
pass
class DrawablePopPattern(_pgmagick.DrawablePopPattern):
pass
class DrawablePushClipPath(_pgmagick.DrawablePushClipPath):
pass
class DrawablePushGraphicContext(_pgmagick.DrawablePushGraphicContext):
pass
class DrawablePushPattern(_pgmagick.DrawablePushPattern):
pass
class DrawableRectangle(_pgmagick.DrawableRectangle):
pass
class DrawableRotation(_pgmagick.DrawableRotation):
pass
class DrawableRoundRectangle(_pgmagick.DrawableRoundRectangle):
pass
class DrawableScaling(_pgmagick.DrawableScaling):
pass
class DrawableSkewX(_pgmagick.DrawableSkewX):
pass
class DrawableSkewY(_pgmagick.DrawableSkewY):
pass
class DrawableStrokeAntialias(_pgmagick.DrawableStrokeAntialias):
pass
class DrawableStrokeColor(_pgmagick.DrawableStrokeColor):
pass
class DrawableStrokeLineCap(_pgmagick.DrawableStrokeLineCap):
pass
class DrawableStrokeLineJoin(_pgmagick.DrawableStrokeLineJoin):
pass
class DrawableStrokeOpacity(_pgmagick.DrawableStrokeOpacity):
pass
class DrawableStrokeWidth(_pgmagick.DrawableStrokeWidth):
pass
class DrawableText(_pgmagick.DrawableText):
pass
class DrawableTextAntialias(_pgmagick.DrawableTextAntialias):
pass
class DrawableTextDecoration(_pgmagick.DrawableTextDecoration):
pass
class DrawableTextUnderColor(_pgmagick.DrawableTextUnderColor):
pass
class DrawableTranslation(_pgmagick.DrawableTranslation):
pass
class DrawableViewbox(_pgmagick.DrawableViewbox):
pass
EndianType = _pgmagick.EndianType
class MagickException(_pgmagick.Exception):
"""Exception Class"""
pass
FillRule = _pgmagick.FillRule
FilterTypes = _pgmagick.FilterTypes
class Geometry(_pgmagick.Geometry):
pass
GravityType = _pgmagick.GravityType
class Image(_pgmagick.Image):
pass
class ImageList(_pgmagick.ImageList):
pass
ImageType = _pgmagick.ImageType
InterlaceType = _pgmagick.InterlaceType
LineCap = _pgmagick.LineCap
LineJoin = _pgmagick.LineJoin
class Montage(_pgmagick.Montage):
pass
class MontageFramed(_pgmagick.MontageFramed):
pass
NoiseType = _pgmagick.NoiseType
if hasattr(_pgmagick, "OrientationType"):
OrientationType = _pgmagick.OrientationType
PaintMethod = _pgmagick.PaintMethod
class PathArcAbs(_pgmagick.PathArcAbs):
pass
class PathArcArgs(_pgmagick.PathArcArgs):
pass
class PathArcArgsList(_pgmagick.PathArcArgsList):
pass
class PathArcRel(_pgmagick.PathArcRel):
pass
class PathClosePath(_pgmagick.PathClosePath):
pass
class PathCurveToArgsList(_pgmagick.PathCurveToArgsList):
pass
class PathCurvetoAbs(_pgmagick.PathCurvetoAbs):
pass
class PathCurvetoArgs(_pgmagick.PathCurvetoArgs):
pass
class PathCurvetoRel(_pgmagick.PathCurvetoRel):
pass
class PathLinetoAbs(_pgmagick.PathLinetoAbs):
pass
class PathLinetoHorizontalAbs(_pgmagick.PathLinetoHorizontalAbs):
pass
class PathLinetoHorizontalRel(_pgmagick.PathLinetoHorizontalRel):
pass
class PathLinetoRel(_pgmagick.PathLinetoRel):
pass
class PathLinetoVerticalAbs(_pgmagick.PathLinetoVerticalAbs):
pass
class PathLinetoVerticalRel(_pgmagick.PathLinetoVerticalRel):
pass
class PathMovetoAbs(_pgmagick.PathMovetoAbs):
pass
class PathMovetoRel(_pgmagick.PathMovetoRel):
pass
class PathQuadraticCurvetoAbs(_pgmagick.PathQuadraticCurvetoAbs):
pass
class PathQuadraticCurvetoArgs(_pgmagick.PathQuadraticCurvetoArgs):
pass
class PathQuadraticCurvetoArgsList(_pgmagick.PathQuadraticCurvetoArgsList):
pass
class PathQuadraticCurvetoRel(_pgmagick.PathQuadraticCurvetoRel):
pass
class PathSmoothCurvetoAbs(_pgmagick.PathSmoothCurvetoAbs):
pass
class PathSmoothCurvetoRel(_pgmagick.PathSmoothCurvetoRel):
pass
class PathSmoothQuadraticCurvetoAbs(_pgmagick.PathSmoothQuadraticCurvetoAbs):
pass
class PathSmoothQuadraticCurvetoRel(_pgmagick.PathSmoothQuadraticCurvetoRel):
pass
class Pixels(_pgmagick.Pixels):
pass
if hasattr(_pgmagick, "SparseColorMethod"):
SparseColorMethod = _pgmagick.SparseColorMethod
if hasattr(_pgmagick, "DistortImageMethod"):
DistortImageMethod = _pgmagick.DistortImageMethod
if hasattr(_pgmagick, "QuantumOperator"):
QuantumOperator = _pgmagick.QuantumOperator
QuantumType = _pgmagick.QuantumType
RenderingIntent = _pgmagick.RenderingIntent
ResolutionType = _pgmagick.ResolutionType
StorageType = _pgmagick.StorageType
StretchType = _pgmagick.StretchType
StyleType = _pgmagick.StyleType
class TypeMetric(_pgmagick.TypeMetric):
pass
if hasattr(_pgmagick, "VirtualPixelMethod"):
VirtualPixelMethod = _pgmagick.VirtualPixelMethod
class VPath(_pgmagick.VPath):
pass
class VPathList(_pgmagick.VPathList):
pass
class gminfo:
version = _pgmagick.get_version()
library = _pgmagick.get_library()
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from rally.common import logging
from rally import consts
from rally import exceptions as rally_exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.nova import utils
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import types
from rally.task import utils as task_utils
from rally.task import validation
LOG = logging.getLogger(__name__)
class NovaServers(utils.NovaScenario,
cinder_utils.CinderScenario):
"""Benchmark scenarios for Nova servers."""
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_list_server(self, image, flavor,
detailed=True, **kwargs):
"""Boot a server from an image and then list all servers.
Measure the "nova list" command performance.
If you have only 1 user in your context, you will
add 1 server on every iteration. So you will have more
and more servers and will be able to measure the
performance of the "nova list" command depending on
the number of servers owned by users.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param detailed: True if the server listing should contain
detailed information about all of them
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor, **kwargs)
self._list_servers(detailed)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def list_servers(self, detailed=True):
"""List all servers.
This simple scenario test the nova list command by listing
all the servers.
:param detailed: True if detailed information about servers
should be listed
"""
self._list_servers(detailed)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_delete_server(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot and delete a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_delete_multiple_servers(self, image, flavor, count=2,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot multiple servers in a single request and delete them.
Deletion is done in parallel with one request per server, not
with a single request for all servers.
:param image: The image to boot from
:param flavor: Flavor used to boot instance
:param count: Number of instances to boot
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for instance creation
"""
servers = self._boot_servers(image, flavor, 1, instances_amount=count,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_servers(servers, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image", validate_disk=False)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_delete(self, image, flavor,
volume_size,
volume_type=None,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot a server from volume and then delete it.
The scenario first creates a volume and then a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param volume_type: specifies volume type when there are
multiple backends
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image,
volume_type=volume_type)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(None, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_bounce_server(self, image, flavor,
force_delete=False, actions=None, **kwargs):
"""Boot a server and run specified actions against it.
Actions should be passed into the actions parameter. Available actions
are 'hard_reboot', 'soft_reboot', 'stop_start', 'rescue_unrescue',
'pause_unpause', 'suspend_resume', 'lock_unlock' and 'shelve_unshelve'.
Delete server after all actions were completed.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param actions: list of action dictionaries, where each action
dictionary speicifes an action to be performed
in the following format:
{"action_name": <no_of_iterations>}
:param kwargs: Optional additional arguments for server creation
"""
action_builder = self._bind_actions()
actions = actions or []
try:
action_builder.validate(actions)
except jsonschema.exceptions.ValidationError as error:
raise rally_exceptions.InvalidConfigException(
"Invalid server actions configuration \'%(actions)s\' due to: "
"%(error)s" % {"actions": str(actions), "error": str(error)})
server = self._boot_server(image, flavor, **kwargs)
for action in action_builder.build_actions(actions, server):
action()
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_lock_unlock_and_delete(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False,
**kwargs):
"""Boot a server, lock it, then unlock and delete it.
Optional 'min_sleep' and 'max_sleep' parameters allow the
scenario to simulate a pause between locking and unlocking the
server (of random duration from min_sleep to max_sleep).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time between locking and unlocking
in seconds
:param max_sleep: Maximum sleep time between locking and unlocking
in seconds
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._lock_server(server)
self.sleep_between(min_sleep, max_sleep)
self._unlock_server(server)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "glance"]})
def snapshot_server(self, image, flavor,
force_delete=False, **kwargs):
"""Boot a server, make its snapshot and delete both.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
image = self._create_image(server)
self._delete_server(server, force=force_delete)
server = self._boot_server(image.id, flavor, **kwargs)
self._delete_server(server, force=force_delete)
self._delete_image(image)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_server(self, image, flavor, auto_assign_nic=False, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor,
auto_assign_nic=auto_assign_nic, **kwargs)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image", validate_disk=False)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume(self, image, flavor, volume_size,
volume_type=None, auto_assign_nic=False,
**kwargs):
"""Boot a server from volume.
The scenario first creates a volume and then a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param volume_type: specifies volume type when there are
multiple backends
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image,
volume_type=volume_type)
block_device_mapping = {"vda": "%s:::1" % volume.id}
self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic,
block_device_mapping=block_device_mapping,
**kwargs)
def _bind_actions(self):
actions = ["hard_reboot", "soft_reboot", "stop_start",
"rescue_unrescue", "pause_unpause", "suspend_resume",
"lock_unlock", "shelve_unshelve"]
action_builder = task_utils.ActionBuilder(actions)
action_builder.bind_action("hard_reboot", self._reboot_server)
action_builder.bind_action("soft_reboot", self._soft_reboot_server)
action_builder.bind_action("stop_start",
self._stop_and_start_server)
action_builder.bind_action("rescue_unrescue",
self._rescue_and_unrescue_server)
action_builder.bind_action("pause_unpause",
self._pause_and_unpause_server)
action_builder.bind_action("suspend_resume",
self._suspend_and_resume_server)
action_builder.bind_action("lock_unlock",
self._lock_and_unlock_server)
action_builder.bind_action("shelve_unshelve",
self._shelve_and_unshelve_server)
return action_builder
def _stop_and_start_server(self, server):
"""Stop and then start the given server.
A stop will be issued on the given server upon which time
this method will wait for the server to become 'SHUTOFF'.
Once the server is SHUTOFF a start will be issued and this
method will wait for the server to become 'ACTIVE' again.
:param server: The server to stop and then start.
"""
self._stop_server(server)
self._start_server(server)
def _rescue_and_unrescue_server(self, server):
"""Rescue and then unrescue the given server.
A rescue will be issued on the given server upon which time
this method will wait for the server to become 'RESCUE'.
Once the server is RESCUE an unrescue will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to rescue and then unrescue.
"""
self._rescue_server(server)
self._unrescue_server(server)
def _pause_and_unpause_server(self, server):
"""Pause and then unpause the given server.
A pause will be issued on the given server upon which time
this method will wait for the server to become 'PAUSED'.
Once the server is PAUSED an unpause will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to pause and then unpause.
"""
self._pause_server(server)
self._unpause_server(server)
def _suspend_and_resume_server(self, server):
"""Suspend and then resume the given server.
A suspend will be issued on the given server upon which time
this method will wait for the server to become 'SUSPENDED'.
Once the server is SUSPENDED an resume will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to suspend and then resume.
"""
self._suspend_server(server)
self._resume_server(server)
def _lock_and_unlock_server(self, server):
"""Lock and then unlock the given server.
A lock will be issued on the given server upon which time
this method will wait for the server to become locked'.
Once the server is locked an unlock will be issued.
:param server: The server to lock and then unlock.
"""
self._lock_server(server)
self._unlock_server(server)
def _shelve_and_unshelve_server(self, server):
"""Shelve and then unshelve the given server.
A shelve will be issued on the given server upon which time
this method will wait for the server to become 'SHELVED'.
Once the server is SHELVED an unshelve will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to shelve and then unshelve.
"""
self._shelve_server(server)
self._unshelve_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"},
to_flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def resize_server(self, image, flavor, to_flavor,
force_delete=False, **kwargs):
"""Boot a server, then resize and delete it.
This test will confirm the resize by default,
or revert the resize if confirm is set to false.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param to_flavor: flavor to be used to resize the booted instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._resize(server, to_flavor)
# by default we confirm
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"},
to_flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def boot_server_attach_created_volume_and_resize(
self, image, flavor, to_flavor, volume_size, min_sleep=0,
max_sleep=0, force_delete=False, confirm=True, do_delete=True,
boot_server_kwargs=None, create_volume_kwargs=None):
"""Create a VM from image, attach a volume to it and resize.
Simple test to create a VM and attach a volume, then resize the VM,
detach the volume then delete volume and VM.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between attaching a volume and running resize
(of random duration from range [min_sleep, max_sleep]).
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param to_flavor: flavor to be used to resize the booted instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param confirm: True if need to confirm resize else revert resize
:param do_delete: True if resources needs to be deleted explicitly
else use rally cleanup to remove resources
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
"""
boot_server_kwargs = boot_server_kwargs or {}
create_volume_kwargs = create_volume_kwargs or {}
server = self._boot_server(image, flavor, **boot_server_kwargs)
volume = self._create_volume(volume_size, **create_volume_kwargs)
attachment = self._attach_volume(server, volume)
self.sleep_between(min_sleep, max_sleep)
self._resize(server, to_flavor)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
if do_delete:
self._detach_volume(server, volume, attachment)
self._delete_volume(volume)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"},
to_flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image", validate_disk=False)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_resize(
self, image, flavor, to_flavor, volume_size, min_sleep=0,
max_sleep=0, force_delete=False, confirm=True, do_delete=True,
boot_server_kwargs=None, create_volume_kwargs=None):
"""Boot a server from volume, then resize and delete it.
The scenario first creates a volume and then a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
This test will confirm the resize by default,
or revert the resize if confirm is set to false.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param to_flavor: flavor to be used to resize the booted instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param confirm: True if need to confirm resize else revert resize
:param do_delete: True if resources needs to be deleted explicitly
else use rally cleanup to remove resources
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
"""
boot_server_kwargs = boot_server_kwargs or {}
create_volume_kwargs = create_volume_kwargs or {}
if boot_server_kwargs.get("block_device_mapping"):
LOG.warning("Using already existing volume is not permitted.")
volume = self._create_volume(volume_size, imageRef=image,
**create_volume_kwargs)
boot_server_kwargs["block_device_mapping"] = {
"vda": "%s:::1" % volume.id}
server = self._boot_server(None, flavor, **boot_server_kwargs)
self.sleep_between(min_sleep, max_sleep)
self._resize(server, to_flavor)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
if do_delete:
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def suspend_and_resume_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, suspend, resume and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._suspend_server(server)
self._resume_server(server)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def pause_and_unpause_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, pause, unpause and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._pause_server(server)
self._unpause_server(server)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def shelve_and_unshelve_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, shelve, unshelve and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._shelve_server(server)
self._unshelve_server(server)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_live_migrate_server(self, image,
flavor, block_migration=False,
disk_over_commit=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Live Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone and then migrates the VM to another
compute node on the same availability zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image", validate_disk=False)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_live_migrate(self, image, flavor,
volume_size,
volume_type=None,
block_migration=False,
disk_over_commit=False,
force_delete=False,
min_sleep=0, max_sleep=0,
**kwargs):
"""Boot a server from volume and then migrate it.
The scenario first creates a volume and a server booted from
the volume on a compute node available in the availability zone and
then migrates the VM to another compute node on the same availability
zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param volume_type: specifies volume type when there are
multiple backends
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param force_delete: True if force_delete should be used
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image,
volume_type=volume_type)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(None, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server, force=force_delete)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def boot_server_attach_created_volume_and_live_migrate(
self,
image,
flavor,
size,
block_migration=False,
disk_over_commit=False,
boot_server_kwargs=None,
create_volume_kwargs=None,
min_sleep=0,
max_sleep=0):
"""Create a VM, attach a volume to it and live migrate.
Simple test to create a VM and attach a volume, then migrate the VM,
detach the volume and delete volume/VM.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between attaching a volume and running live
migration (of random duration from range [min_sleep, max_sleep]).
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param size: volume size (in GB)
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
"""
if boot_server_kwargs is None:
boot_server_kwargs = {}
if create_volume_kwargs is None:
create_volume_kwargs = {}
server = self._boot_server(image, flavor, **boot_server_kwargs)
volume = self._create_volume(size, **create_volume_kwargs)
attachment = self._attach_volume(server, volume)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._detach_volume(server, volume, attachment)
self._delete_volume(volume)
self._delete_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_migrate_server(self, image, flavor, **kwargs):
"""Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone, and then migrates the VM
to another compute node on the same availability zone.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._migrate(server)
# NOTE(wtakase): This is required because cold migration and resize
# share same code path.
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server, status="ACTIVE")
else:
self._resize_revert(server, status="ACTIVE")
self._delete_server(server)
@types.convert(from_image={"type": "glance_image"},
to_image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "from_image")
@validation.image_valid_on_flavor("flavor", "to_image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_rebuild_server(self, from_image, to_image, flavor, **kwargs):
"""Rebuild a server.
This scenario launches a VM, then rebuilds that VM with a
different image.
:param from_image: image to be used to boot an instance
:param to_image: image to be used to rebuild the instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(from_image, flavor, **kwargs)
self._rebuild_server(server, to_image)
self._delete_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@validation.required_contexts("network")
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_associate_floating_ip(self, image, flavor, **kwargs):
"""Boot a server and associate a floating IP to it.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
address = network_wrapper.wrap(self.clients, self).create_floating_ip(
tenant_id=server.tenant_id)
self._associate_floating_ip(server, address["ip"])
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_show_server(self, image, flavor, **kwargs):
"""Show server details.
This simple scenario tests the nova show command by retrieving
the server details.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
:returns: Server details
"""
server = self._boot_server(image, flavor, **kwargs)
self._show_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_get_console_output(self, image, flavor,
length=None, **kwargs):
"""Get text console output from server.
This simple scenario tests the nova console-log command by retrieving
the text console log output.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param length: The number of tail log lines you would like to retrieve.
None (default value) or -1 means unlimited length.
:param kwargs: Optional additional arguments for server creation
:returns: Text console log output for server
"""
server = self._boot_server(image, flavor, **kwargs)
self._get_server_console_output(server, length)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova"]})
def boot_and_update_server(self, image, flavor, description=None,
**kwargs):
"""Boot a server, then update its name and description.
The scenario first creates a server, then update it.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param description: update the server description
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._update_server(server, description)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_snapshot(self, image, flavor, volume_size,
volume_type=None,
auto_assign_nic=False, **kwargs):
"""Boot a server from a snapshot.
The scenario first creates a volume and creates a
snapshot from this volume, then boots a server from
the created snapshot.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param volume_type: specifies volume type when there are
multiple backends
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image,
volume_type=volume_type)
snapshot = self._create_snapshot(volume.id, False)
block_device_mapping = {"vda": "%s:snap::1" % snapshot.id}
self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic,
block_device_mapping=block_device_mapping,
**kwargs)
| |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import numpy as np
import mxnet as mx
import random
import math
from mxnet.executor_manager import _split_input_slice
from utils.image import tensor_vstack
from segmentation.segmentation import get_segmentation_train_batch, get_segmentation_test_batch
from PIL import Image
from multiprocessing import Pool
class TestDataLoader(mx.io.DataIter):
def __init__(self, segdb, config, batch_size=1, shuffle=False):
super(TestDataLoader, self).__init__()
# save parameters as properties
self.segdb = segdb
self.batch_size = batch_size
self.shuffle = shuffle
self.config = config
# infer properties from roidb
self.size = len(self.segdb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [None for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label, im_info = get_segmentation_test_batch(segdb, self.config)
self.data = [[mx.nd.array(data[i][name]) for name in self.data_name] for i in xrange(len(data))]
self.im_info = im_info
class TrainDataLoader(mx.io.DataIter):
def __init__(self, sym, segdb, config, batch_size=1, crop_height = 768, crop_width = 1024, shuffle=False, ctx=None, work_load_list=None):
"""
This Iter will provide seg data to Deeplab network
:param sym: to infer shape
:param segdb: must be preprocessed
:param config: config file
:param batch_size: must divide BATCH_SIZE(128)
:param crop_height: the height of cropped image
:param crop_width: the width of cropped image
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:return: DataLoader
"""
super(TrainDataLoader, self).__init__()
# save parameters as properties
self.sym = sym
self.segdb = segdb
self.config = config
self.batch_size = batch_size
if self.config.TRAIN.ENABLE_CROP:
self.crop_height = crop_height
self.crop_width = crop_width
else:
self.crop_height = None
self.crop_width = None
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
# infer properties from segdb
self.size = len(segdb)
self.index = np.arange(self.size)
# decide data and label names
self.data_name = ['data']
self.label_name = ['label']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# init multi-process pool
self.pool = Pool(processes = len(self.ctx))
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_parallel()
random.seed()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_parallel()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
_, label_shape, _ = self.sym.infer_shape(**max_shapes)
label_shape = [(self.label_name[0], label_shape)]
return max_data_shape, label_shape
def get_batch_parallel(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
multiprocess_results = []
for idx, islice in enumerate(slices):
isegdb = [segdb[i] for i in range(islice.start, islice.stop)]
multiprocess_results.append(self.pool.apply_async(parfetch, (self.config, self.crop_width, self.crop_height, isegdb)))
rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results]
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(config, crop_width, crop_height, isegdb):
# get testing data for multigpu
data, label = get_segmentation_train_batch(isegdb, config)
if config.TRAIN.ENABLE_CROP:
data_internal = data['data']
label_internal = label['label']
sx = math.floor(random.random() * (data_internal.shape[3] - crop_width + 1))
sy = math.floor(random.random() * (data_internal.shape[2] - crop_height + 1))
sx = (int)(sx)
sy = (int)(sy)
assert(sx >= 0 and sx < data_internal.shape[3] - crop_width + 1)
assert(sy >= 0 and sy < data_internal.shape[2] - crop_height + 1)
ex = (int)(sx + crop_width - 1)
ey = (int)(sy + crop_height - 1)
data_internal = data_internal[:, :, sy : ey + 1, sx : ex + 1]
label_internal = label_internal[:, :, sy : ey + 1, sx : ex + 1]
data['data'] = data_internal
label['label'] = label_internal
assert (data['data'].shape[2] == crop_height) and (data['data'].shape[3] == crop_width)
assert (label['label'].shape[2] == crop_height) and (label['label'].shape[3] == crop_width)
return {'data': data, 'label': label}
| |
# Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The API for using Ravello through Fabric is defined in the ``testmill.fabric``
module. The interface is a functional interface very similar to Fabric itself.
Configuration attributes test are needed by TestMill are stored into the Fabric
``env`` global environment. The following attributes are known:
==================== =========================================================
Attribute Description
==================== =========================================================
ravello_user The ssh user to use when connecting to VMs in Ravello
applications. The default value is ``'ravello'``. You
can set this to ``None`` to let Fabric determine the user
name.
ravello_api_user The user for the Ravello API. If not set, you will be
prompted for it when a connection is needed. This is the
same user name that you use to log on to the Ravello web
interface.
ravello_api_password The password for the Ravello API. If not set, you will be
prompted for it when a connection is needed. This is the
same password that you use to log on to the Ravello web
interface.
ravello_api_url The URL to the Ravello API. The default is ``None`` which
uses the default API entry point.
==================== =========================================================
Most functions need a connection to the Ravello API. This connection is created
automatically for you the first time it is needed. SSH keys for the virtual
machines in Ravello applications are automaticaly set up for you as well, and
added to ``env.key_filename``.
"""
from __future__ import absolute_import, print_function
import argparse
from functools import wraps
from testmill.state import env
from testmill import (cache, error, login, keypair, ravello, util,
application, main, compat, manifest, console)
from fabric import api as fab
import fabric.utils
__all__ = ['new_application_name', 'get_application', 'get_applications',
'create_application', 'start_application', 'stop_application',
'remove_application', 'new_blueprint_name', 'get_blueprint',
'get_blueprints', 'create_blueprint', 'remove_blueprint',
'lookup', 'reverse_lookup', 'hosts', 'only_on']
def _setup_testmill():
"""Set up or re-sync a TestMill "env" from a Fabric "env". Also set some
useful defaults in the Fabric environment for using TestMill.
In essense this is a bridge between Fabric and the TestMill package.
"""
env.quiet = False
env.verbose = fab.output.debug
env.manifest = None
env.debug = fab.output.debug
env.always_confirm = False
if not hasattr(fab.env, 'ravello_user'):
fab.env.ravello_user = 'ravello'
if not hasattr(fab.env, 'ravello_api_user'):
fab.env.ravello_api_user = None
if not hasattr(fab.env, 'ravello_api_password'):
fab.env.ravello_api_password = None
if not hasattr(fab.env, 'ravello_api_url'):
fab.env.ravello_api_url = None
if not hasattr(env, 'api'):
main.setup_logging()
env.service_url = fab.env.ravello_api_url
env.api = ravello.RavelloClient(service_url=env.service_url)
try:
login.token_login()
except error.ProgramError:
if not fab.env.ravello_api_user:
msg = 'Please enter your Ravello username: '
fab.env.ravello_api_user = console.prompt(msg)
env.username = fab.env.ravello_api_user
if not fab.env.ravello_api_password:
msg = 'Please enter your Ravello password: '
fab.env.ravello_api_password = console.getpass(msg)
env.password = fab.env.ravello_api_password
login.password_login()
login.store_token()
if not hasattr(env, 'public_key'):
keypair.default_keypair()
key_filename = env.private_key_file
if fab.env.key_filename is None:
fab.env.key_filename = key_filename
elif isinstance(fab.env, compat.str):
fab.env.key_filename = [fab.env.key_filename, key_filename]
elif isinstance(fab.env, list):
fab.env.key_filename.append(key_filename)
# NOTE: functions decorated with @with_fabric should repeat there signatures on
# the first line of the docstring, otherwise they will shop up in Sphinx as
# having xxx(*args, **kwargs) signature.
def with_fabric(func):
"""Decorator that calls _setup_testmill before invokding *func*, and that
captures TestMill errors and forwards those to Fabric."""
@wraps(func)
def wrapper(*args, **kwargs):
_setup_testmill()
try:
return func(*args, **kwargs)
except ravello.RavelloError as e:
if env.debug:
raise
fab.env.warn_only = False
fabric.utils.error(str(e))
except error.ProgramError as e:
if env.debug:
raise
fab.env.warn_only = False
fabric.utils.error(str(e))
return wrapper
@with_fabric
def new_application_name(template='fabric'):
"""new_application_name(template='fabric')
Return an new unique application name.
The argument *template* specifies the base name, to which a unique numeric
suffix is appended.
"""
name = application.new_application_name(template)
return name
@with_fabric
def get_application(name):
"""get_application(name)
Lookup the application *name*.
If the application exists, return the application definition for it. The
application is be a dictionary with string keys describing the application.
If the application is not found, return None.
.. seealso::
See :ref:`application-ref` for the possible keys in the application
definition dictionary.
"""
app = cache.get_application(name=name)
if app is None:
return
return application.appdef_from_app(app)
@with_fabric
def get_applications():
"""get_applications()
Return a list containing all applications.
"""
applications = []
for app in cache.get_applications():
app = cache.get_application(app['id'])
applications.append(application.appdef_from_app(app))
return applications
@with_fabric
def create_application(name=None, blueprint=None, vms=None, cloud=None,
region=None, wait=True, show_progress=True):
"""create_application(name=None, blueprint=None, vms=None, cloud=None, \
region=None, wait=True, show_progress=True)
Create a new application.
If *blueprint* is specified, then it must be the name of a blueprint from
which the application is created. If *blueprint* is not specified, then an
application will be created from scratch in which case *vms* needs to be
specified containing a list of the VM definitions. The VM definitions are
dictionaries containing string keys describing the VM. The arguments
*cloud* and *region* specify which cloud and region to publish the
application to. If these are not specified, the application is published
to the lowest cost cloud that fits the VM definitions. If *wait* is
nonzero, then this function will wait until the application is started up
and its VMs are accessible via ssh. If *show_progress* is nonzero, then a
progress bar is shown.
The return value of this function is the application definition of the
application that was created. In case of an error, an exception is raised.
.. seealso::
See :ref:`vm-ref` for the possible keys in a VM definition dictionary.
"""
if blueprint:
bp = cache.get_blueprint(name=blueprint)
if bp is None:
error.raise_error('Blueprint `{0}` not found.', blueprint)
if name is None:
name = new_application_name(bp['name'])
else:
if name is None:
name = new_application_name()
appdef = { 'name': name }
if vms:
appdef['vms'] = vms
if blueprint:
appdef['blueprint'] = blueprint
manif = { 'applications': [appdef],
'defaults': { 'vms': { 'smp': 1, 'memory': 2048 } } }
manifest.check_manifest(manif)
manifest.percolate_defaults(manif)
manifest.check_manifest_entities(manif)
app = application.create_new_application(appdef, False)
app = application.publish_application(app, cloud, region)
if wait:
vms = set((vm['name'] for vm in app['vms']))
with env.let(quiet=not show_progress):
app = application.wait_for_application(app, vms)
return application.appdef_from_app(app)
@with_fabric
def start_application(name, wait=True, show_progress=True, timeout=1200):
"""start_application(name, wait=True, show_progress=True, timeout=1200)
Start up an application.
The *name* argument must be the name of an application. If *wait* is
nonzero, then this function will wait until the application is up and its
VMs are accessible via ssh. If *show_progress* is nonzero then a progress
bar is shown. The *timeout* argument specifies the timeout in seconds.
The default timeout is 20 minutes. Application startup times vary greatly
between clouds, and whether or not the application has already been
published.
This method will start all VMs in the application that are in the 'STOPPED'
state. If *wait* is nonzero, then all VMs must either be in the 'STOPPED'
state (in which case they will get started), in the 'STARTED' state (in
which case there is nothing to do), or in a state that will eventually
transition to 'STARTED' state (currently 'STARTING' and 'PUBLISHING'). If a
VM is in another state, then no action is taken and an exception is raised,
because this call would just timeout without the ability to complete.
This function has no return value, and raises an exception in case of an
error.
"""
app = cache.get_application(name=name)
if app is None:
error.raise_error("Application `{0}` does not exist.", name)
app = application.start_application(app)
if wait:
state = application.get_application_state(app)
if state not in application.vm_reuse_states:
error.raise_error("Cannot wait for app in state '{0}'.", state)
vms = set((vm['name'] for vm in app['vms']))
with env.let(quiet=not show_progress):
application.wait_for_application(app, vms, timeout)
@with_fabric
def stop_application(name, wait=True, timeout=300):
"""stop_application(name)
Stop an application with name *name*.
This method will stop all VMs in the application that are currently in the
'STARTED' state. VMs other states are not touched.
The application may not be fully stopped even after this call. For example,
if a VM is in the 'STARTING' state, it will eventually transition to
'STARTED'. But a 'STARTNG' VM cannot be stopped before it reaches the
'STARTED' state.
"""
app = cache.get_application(name=name)
if app is None:
error.raise_error("Application `{0}` does not exist.", name)
app = application.stop_application(app)
if wait:
state = application.get_application_state(app)
if state not in ('STARTED', 'STOPPING', 'STOPPED'):
error.raise_error("Cannot wait for app in state '{0}',", state)
application.wait_until_application_is_in_state(app, 'STOPPED', timeout)
@with_fabric
def remove_application(name):
"""remove_application(name)
Delete the application with name *name*.
It is not an error to delete an application that does not exist. An
application can always be deleted no matter what in the state its VMs are.
If the application has running VMs, those will be uncleanly shutdown.
Deleting an application destroys all data relating to the application
including its VMs and their disks. This operation cannot be undone.
"""
app = cache.get_application(name=name)
if app is None:
return
application.remove_application(app)
@with_fabric
def new_blueprint_name(template='fabric'):
"""new_blueprint_name(template='fabric')
Return a new unique blueprint name.
The argument *template* specifies the base name, to which a unique numeric
suffix is appended.
"""
name = application.new_blueprint_name(template)
return name
@with_fabric
def get_blueprint(name):
"""get_blueprint(name)
Lookup the blueprint *name*.
If the blueprint exists, return the blueprint definition for it. The
format is the same as the application definition that created the blueprint
with some operational fields removed.
"""
bp = cache.get_blueprint(name=name)
if not bp:
return
return application.appdef_from_app(bp)
@with_fabric
def get_blueprints():
"""get_blueprints()
Return a list of all blueprints.
"""
blueprints = []
for bp in cache.get_blueprints():
bp = cache.get_blueprint(bp['id'])
blueprints.append(application.appdef_from_app(bp))
return blueprints
@with_fabric
def create_blueprint(name, bpname=None, wait=True):
"""create_blueprint(name, bpname=None)
Create a blueprint from an application.
The *name* argument must be an application whose VMs are either all in
the STOPPED or in the STARTED state. The *bpname* argument is the name
of the blueprint. If the blueprint name is not specified, a new unique name
will be allocated.
The return value of this function is the name of the blueprint that was
created.
"""
app = cache.get_application(name=name)
if app is None:
error.raise_error("Application `{0}` does not exist.", name)
state = application.get_application_state(app)
if state not in ('STOPPED', 'STARTED'):
error.raise_error('Application `{0}` is currently in state {1}.\n'
'Can only save when STOPPED or STARTED.',
name, state)
if bpname is None:
bpname = new_blueprint_name('bp-{0}'.format(name))
bp = application.create_blueprint(bpname, app)
if wait:
bp = application.wait_until_blueprint_is_in_state(bp, 'DONE')
return application.appdef_from_app(bp)
@with_fabric
def remove_blueprint(name):
"""remove_blueprint(name)
Delete a blueprint.
It is not an error to delete a blueprint that does not exist.
Deleting a blueprint destroys all data relating to the blueprint
including its VMs and their disks. This operation cannot be undone.
"""
bp = cache.get_blueprint(name=name)
if not bp:
return
application.remove_blueprint(bp)
@with_fabric
def lookup(appname, *vms):
"""lookup(appname, *vms)
Lookup the addresses for virtual machines in a Ravello application.
The *appname* parameter must be the name of the application. After this
parameter you may pass one or multiple positional arguments containing the
VM names. If the VMs are not specified, all VMs in the application are
selected. The first VM argument may also be a sequence, set or mapping
containing VM names.
The return value is a list of Fabric host strings, and may be directly
assigned to *env.hosts*.
"""
app = cache.get_application(name=appname)
if app is None:
error.raise_error("Application `{0}` does not exist.", appname)
if isinstance(vms, compat.str):
vms = [vms]
app = cache.get_application(app['id'])
hosts = []
for vm in app.get('vms', []):
if vms and vm['name'] not in vms:
continue
host = vm['dynamicMetadata']['externalIp']
if fab.env.ravello_user:
host = '{0}@{1}'.format(fab.env.ravello_user, host)
hosts.append(host)
return hosts
@with_fabric
def reverse_lookup(host):
"""reverse_lookup(host)
Reverse lookup an *env.host* identifier.
This function returns an (appname, vmname) tuple. If the host is not found,
a ``ValueError`` is raised. This function can be used inside a task to get
the VM name you are executing on. For example::
@task
def mytask():
appname, vmname = ravello.reverse_lookup(env.host)
if vmname == 'web':
# take actions for VM 'web'
else:
# take other actions
This function is especially useful with ``@parallel``, as it allows you to
run tasks on multiple VMs in parallel where each task can behave different
on different machines.
"""
# NOTE: this depends on the IP address of the host being in the cache. But
# without poking under the hood of this API, there is no way of getting a
# host string without going the application ending up in the case. So
# therefore this should be fine.
for app in env._applications_byid.values():
for vm in app.get('vms', []):
addr = vm.get('dynamicMetadata', {}).get('externalIp')
if addr == host:
return (app['name'], vm['name'])
raise ValueError('Not a Ravello host: {0}.'.format(host))
@with_fabric
def hosts(appname, vms=None):
"""hosts(appname, vms=None)
Run a task on *vms* in application *appname*.
This function should be used as a decorator on a Fabric task. For example::
@ravello.hosts('production', 'web')
def deploy():
# deploy to VM 'web' here
Which is identical to::
@hosts(ravello.lookup('production', 'web'))
def deploy():
# deploy to VM 'web' here
Note that this decorator will do the lookup at the time the decorator is
called. This lookup requires a Ravello API connection. You must either have
``env.ravello_api_user`` and ``env.ravello_api_password`` set, or you
will be prompted for the API username and password. In case you want to do
the lookup later, use :func:`lookup` directly and assign the return value
to ``env.hosts``.
"""
hosts = lookup(apppname, vms)
return fab.hosts(*hosts)
def only_on(*names):
"""Run a Fabric task only on named VMs.
You may pass in a one or more VM names as positional arguments. The first
element may also be a sequence, set or mapping containing names.
This function should be used as a decorator on a task, for example::
@ravello.only_on('db')
def deploy():
# deploy to VM 'db' here
This function works as a filter and does not interfere with ``env.hosts``.
The task will run for all hosts otherwise selected, but the task will be a
no-op for VMs not specified.
"""
if len(names) > 0:
if not isinstance(names[0], compat.str):
names = set(names[0]) + set(names[1:])
def wrapper(func):
@wraps(func)
def invoke(*args, **kwargs):
appname, vmname = reverse_lookup(fab.env.host)
if vmname not in names:
return
return func(*args, **kwargs)
return invoke
return wrapper
| |
#!/usr/bin/env python
import json
import netaddr
import os
import openstack
import subprocess
CTLPLANE_NETWORK_NAME = 'ctlplane'
CONF = json.loads(os.environ['config'])
def _run_command(args, env=None, name=None):
"""Run the command defined by args and return its output
:param args: List of arguments for the command to be run.
:param env: Dict defining the environment variables. Pass None to use
the current environment.
:param name: User-friendly name for the command being run. A value of
None will cause args[0] to be used.
"""
if name is None:
name = args[0]
if env is None:
env = os.environ
env = env.copy()
# When running a localized python script, we need to tell it that we're
# using utf-8 for stdout, otherwise it can't tell because of the pipe.
env['PYTHONIOENCODING'] = 'utf8'
try:
return subprocess.check_output(args,
stderr=subprocess.STDOUT,
env=env).decode('utf-8')
except subprocess.CalledProcessError as ex:
print('ERROR: %s failed: %s' % (name, ex.output))
raise
def _ensure_neutron_network(sdk):
try:
network = list(sdk.network.networks(name=CTLPLANE_NETWORK_NAME))
if not network:
network = sdk.network.create_network(
name=CTLPLANE_NETWORK_NAME,
provider_network_type='flat',
provider_physical_network=CONF['physical_network'],
mtu=CONF['mtu'])
print('INFO: Network created %s' % network)
else:
network = sdk.network.update_network(
network[0].id,
name=CTLPLANE_NETWORK_NAME,
mtu=CONF['mtu'])
print('INFO: Network updated %s' % network)
except Exception:
print('ERROR: Network create/update failed.')
raise
return network
def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes,
allocation_pools, name, segment_id, dns_nameservers):
try:
if netaddr.IPNetwork(cidr).version == 6:
subnet = sdk.network.create_subnet(
name=name,
cidr=cidr,
gateway_ip=gateway,
enable_dhcp=True,
ip_version='6',
ipv6_address_mode='dhcpv6-stateless',
ipv6_ra_mode='dhcpv6-stateless',
allocation_pools=allocation_pools,
network_id=network_id,
segment_id=segment_id,
dns_nameservers=dns_nameservers)
else:
subnet = sdk.network.create_subnet(
name=name,
cidr=cidr,
gateway_ip=gateway,
host_routes=host_routes,
enable_dhcp=True,
ip_version='4',
allocation_pools=allocation_pools,
network_id=network_id,
segment_id=segment_id,
dns_nameservers=dns_nameservers)
print('INFO: Subnet created %s' % subnet)
except Exception:
print('ERROR: Create subnet %s failed.' % name)
raise
return subnet
def _neutron_subnet_update(sdk, subnet_id, cidr, gateway, host_routes,
allocation_pools, name, dns_nameservers):
try:
if netaddr.IPNetwork(cidr).version == 6:
subnet = sdk.network.update_subnet(
subnet_id,
name=name,
gateway_ip=gateway,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
else:
subnet = sdk.network.update_subnet(
subnet_id,
name=name,
gateway_ip=gateway,
host_routes=host_routes,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
print('INFO: Subnet updated %s' % subnet)
except Exception:
print('ERROR: Update of subnet %s failed.' % name)
raise
def _neutron_add_subnet_segment_association(sdk, subnet_id, segment_id):
try:
subnet = sdk.network.update_subnet(subnet_id, segment_id=segment_id)
print('INFO: Segment association added to Subnet %s' % subnet)
except Exception:
print('ERROR: Associationg segment with subnet %s failed.' % subnet_id)
raise
def _neutron_segment_create(sdk, name, network_id, phynet):
try:
segment = sdk.network.create_segment(
name=name,
network_id=network_id,
physical_network=phynet,
network_type='flat')
print('INFO: Neutron Segment created %s' % segment)
except Exception as ex:
print('ERROR: Neutron Segment %s create failed.' % name)
raise
return segment
def _neutron_segment_update(sdk, segment_id, name):
try:
segment = sdk.network.update_segment(segment_id, name=name)
print('INFO: Neutron Segment updated %s', segment)
except Exception:
print('ERROR: Neutron Segment %s update failed.' % name)
raise
def _ensure_neutron_router(sdk, name, subnet_id):
try:
router = sdk.network.create_router(name=name, admin_state_up='true')
sdk.network.add_interface_to_router(router.id, subnet_id=subnet_id)
except Exception:
print('ERROR: Create router for subnet %s failed.' % name)
raise
def _get_subnet(sdk, cidr, network_id):
try:
subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id))
except Exception as ex:
print('ERROR: Get subnet with cidr %s failed.' % cidr)
raise
return False if not subnet else subnet[0]
def _get_segment(sdk, phy, network_id):
try:
segment = list(sdk.network.segments(physical_network=phy,
network_id=network_id))
except Exception:
print('ERROR: Get segment for physical_network %s on network_id %s '
'failed.' % (phy, network_id))
raise
return False if not segment else segment[0]
def _set_network_tags(sdk, network, tags):
try:
sdk.network.set_tags(network, tags=tags)
print('INFO: Tags %s added to network %s.' % (tags, network.name))
except Exception:
print('ERROR: Setting tags %s on network %s failed.' %
(tags, network.name))
raise
def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet on the segment that is local to
the underclud.
"""
s = CONF['subnets'][CONF['local_subnet']]
name = CONF['local_subnet']
subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id)
segment = _get_segment(sdk, CONF['physical_network'], ctlplane_id)
host_routes = [{'destination': '169.254.169.254/32',
'nexthop': CONF['local_ip']}]
host_routes += s['HostRoutes']
if subnet:
if CONF['enable_routed_networks'] and subnet.segment_id == None:
# The subnet exists and does not have a segment association. Since
# routed networks is enabled in the configuration, we need to
# migrate the existing non-routed networks subnet to a routed
# networks subnet by associating the network segment_id with the
# subnet.
_neutron_add_subnet_segment_association(sdk, subnet.id, segment.id)
_neutron_subnet_update(
sdk, subnet.id, s['NetworkCidr'], s['NetworkGateway'], host_routes,
s.get('AllocationPools'), name, CONF['nameservers'])
else:
if CONF['enable_routed_networks']:
segment_id = segment.id
else:
segment_id = None
subnet = _neutron_subnet_create(
sdk, ctlplane_id, s['NetworkCidr'], s['NetworkGateway'],
host_routes, s.get('AllocationPools'), name, segment_id,
CONF['nameservers'])
# If the subnet is IPv6 we need to start a router so that router
# advertisments are sent out for stateless IP addressing to work.
if netaddr.IPNetwork(s['NetworkCidr']).version == 6:
_ensure_neutron_router(sdk, name, subnet.id)
net_cidrs.append(s['NetworkCidr'])
return net_cidrs
def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet(s) on segments that is
not local to the undercloud.
"""
for name in CONF['subnets']:
s = CONF['subnets'][name]
if name == CONF['local_subnet']:
continue
phynet = name
metadata_nexthop = s['NetworkGateway']
host_routes = [{'destination': '169.254.169.254/32',
'nexthop': metadata_nexthop}]
host_routes += s['HostRoutes']
subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id)
segment = _get_segment(sdk, phynet, ctlplane_id)
if subnet:
_neutron_segment_update(sdk, subnet.segment_id, name)
_neutron_subnet_update(
sdk, subnet.id, s['NetworkCidr'], s['NetworkGateway'],
host_routes, s.get('AllocationPools'), name,
CONF['nameservers'])
else:
if segment:
_neutron_segment_update(sdk, segment.id, name)
else:
segment = _neutron_segment_create(sdk, name, ctlplane_id,
phynet)
subnet = _neutron_subnet_create(
sdk, ctlplane_id, s['NetworkCidr'], s['NetworkGateway'],
host_routes, s.get('AllocationPools'), name, segment.id,
CONF['nameservers'])
# If the subnet is IPv6 we need to start a router so that router
# advertisments are sent out for stateless IP addressing to work.
if netaddr.IPNetwork(s['NetworkCidr']).version == 6:
_ensure_neutron_router(sdk, name, subnet.id)
net_cidrs.append(s['NetworkCidr'])
return net_cidrs
if 'true' not in _run_command(['hiera', 'neutron_api_enabled'],
name='hiera').lower():
print('WARNING: UndercloudCtlplaneNetworkDeployment : The Neutron API '
'is disabled. The ctlplane network cannot be configured.')
else:
sdk = openstack.connect(CONF['cloud_name'])
network = _ensure_neutron_network(sdk)
net_cidrs = []
# Always create/update the local_subnet first to ensure it is can have the
# subnet associated with a segment prior to creating the remote subnets if
# the user enabled routed networks support on undercloud update.
net_cidrs = _local_neutron_segments_and_subnets(sdk, network.id, net_cidrs)
if CONF['enable_routed_networks']:
net_cidrs = _remote_neutron_segments_and_subnets(sdk, network.id,
net_cidrs)
# Set the cidrs for all ctlplane subnets as tags on the ctlplane network.
# These tags are used for the NetCidrMapValue in tripleo-heat-templates.
_set_network_tags(sdk, network, net_cidrs)
| |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import status_params
from resource_management.core.logger import Logger
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.stack_tools import get_stack_name, get_stack_root
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from status_params import *
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
# server configurations
config = Script.get_config()
stack_root = '/usr/lib'
tmp_dir = Script.get_tmp_dir()
stack_name = status_params.stack_name
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
# E.g., 2.3.2.0
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = True
stack_supports_ranger_audit_db = False
stack_supports_core_site_for_ranger_plugin = False
# This is the version whose state is CURRENT. During an RU, this is the source version.
# DO NOT format it since we need the build number too.
upgrade_from_version = upgrade_summary.get_source_version()
source_stack = default("/commandParams/source_stack", None)
if source_stack is None:
source_stack = upgrade_summary.get_source_stack("KNOX")
source_stack_name = get_stack_name(source_stack)
if source_stack_name is not None and source_stack_name != stack_name:
source_stack_root = get_stack_root(source_stack_name, default('/configurations/cluster-env/stack_root', None))
else:
source_stack_root = stack_root
# server configurations
# Default value used in HDP 2.3.0.0 and earlier.
knox_data_dir = '/var/lib/knox/data'
# Important, it has to be strictly greater than 2.3.0.0!!!
knox_master_secret_path = format('{knox_data_dir}/security/master')
knox_cert_store_path = format('{knox_data_dir}/security/keystores/gateway.jks')
knox_user = default("/configurations/knox-env/knox_user", "knox")
# server configurations
knox_data_dir = '/var/lib/knox/data'
knox_logs_dir = '/var/log/knox'
# default parameters
knox_bin = '/usr/lib/knox/bin/gateway.sh'
knox_conf_dir = '/etc/knox/conf'
ldap_bin = '/usr/lib/knox/bin/ldap.sh'
knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
# HDP 2.2+ parameters
knox_group = default("/configurations/knox-env/knox_group", "knox")
mode = 0644
dfs_ha_enabled = False
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
if dfs_ha_nameservices is None:
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
namenode_rpc = None
if dfs_ha_namenode_ids:
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
if hostname.lower() in nn_host.lower():
namenode_id = nn_id
namenode_rpc = nn_host
# With HA enabled namenode_address is recomputed
namenode_address = format('hdfs://{dfs_ha_nameservices}')
namenode_port_map = {}
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.http-address.{dfs_ha_nameservices}.{nn_id}')]
nn_host_parts = nn_host.split(':')
namenode_port_map[nn_host_parts[0]] = nn_host_parts[1]
namenode_hosts = default("/clusterHostInfo/namenode_host", None)
if type(namenode_hosts) is list:
namenode_host = namenode_hosts[0]
else:
namenode_host = namenode_hosts
has_namenode = not namenode_host == None
namenode_http_port = "50070"
namenode_https_port = "50470"
namenode_rpc_port = "8020"
if has_namenode:
if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
if 'dfs.namenode.https-address' in config['configurations']['hdfs-site']:
namenode_https_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.https-address'])
if dfs_ha_enabled and namenode_rpc:
namenode_rpc_port = get_port_from_url(namenode_rpc)
else:
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
hdfs_https_on = False
hdfs_scheme = 'http'
if dfs_http_policy != None :
hdfs_https_on = (dfs_http_policy.upper() == 'HTTPS_ONLY')
hdfs_scheme = 'http' if not hdfs_https_on else 'https'
hdfs_port = str(namenode_http_port) if not hdfs_https_on else str(namenode_https_port)
namenode_http_port = hdfs_port
webhdfs_service_urls = ""
def buildUrlElement(protocol, hdfs_host, port, servicePath) :
openTag = "<url>"
closeTag = "</url>"
proto = protocol + "://"
newLine = "\n"
if hdfs_host is None or port is None:
return ""
else:
return openTag + proto + hdfs_host + ":" + port + servicePath + closeTag + newLine
namenode_host_keys = namenode_port_map.keys();
if len(namenode_host_keys) > 0:
for host in namenode_host_keys:
webhdfs_service_urls += buildUrlElement("http", host, namenode_port_map[host], "/webhdfs")
else:
webhdfs_service_urls = buildUrlElement("http", namenode_host, namenode_http_port, "/webhdfs")
yarn_http_policy = default('/configurations/yarn-site/yarn.http.policy', None )
yarn_https_on = False
yarn_scheme = 'http'
if yarn_http_policy != None :
yarn_https_on = ( yarn_http_policy.upper() == 'HTTPS_ONLY')
yarn_scheme = 'http' if not yarn_https_on else 'https'
rm_hosts = default("/clusterHostInfo/rm_host", None)
if type(rm_hosts) is list:
rm_host = rm_hosts[0]
else:
rm_host = rm_hosts
has_rm = not rm_host == None
jt_rpc_port = "8050"
rm_port = "8080"
if has_rm:
if 'yarn.resourcemanager.address' in config['configurations']['yarn-site']:
jt_rpc_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.address'])
if 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
hive_http_port = default('/configurations/hive-site/hive.server2.thrift.http.port', "10001")
hive_http_path = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_hosts = default("/clusterHostInfo/hive_server_host", None)
if type(hive_server_hosts) is list:
hive_server_host = hive_server_hosts[0]
else:
hive_server_host = hive_server_hosts
templeton_port = default('/configurations/webhcat-site/templeton.port', "50111")
webhcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", None)
if type(webhcat_server_hosts) is list:
webhcat_server_host = webhcat_server_hosts[0]
else:
webhcat_server_host = webhcat_server_hosts
hive_scheme = 'http'
webhcat_scheme = 'http'
hbase_master_scheme = 'http'
hbase_master_ui_port = default('/configurations/hbase-site/hbase.master.info.port', "16010");
hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
if type(hbase_master_hosts) is list:
hbase_master_host = hbase_master_hosts[0]
else:
hbase_master_host = hbase_master_hosts
#
# Oozie
#
oozie_https_port = None
oozie_scheme = 'http'
oozie_server_port = "11000"
oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
if type(oozie_server_hosts) is list:
oozie_server_host = oozie_server_hosts[0]
else:
oozie_server_host = oozie_server_hosts
oozie_scheme = 'http'
has_oozie = not oozie_server_host == None
if has_oozie:
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
oozie_https_port = default("/configurations/oozie-site/oozie.https.port", None)
if oozie_https_port is not None:
oozie_scheme = 'https'
oozie_server_port = oozie_https_port
#
# Falcon
#
falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", None)
if type(falcon_server_hosts) is list:
falcon_server_host = falcon_server_hosts[0]
else:
falcon_server_host = falcon_server_hosts
falcon_scheme = 'http'
has_falcon = not falcon_server_host == None
falcon_server_port = "15000"
if has_falcon:
falcon_server_port = config['configurations']['falcon-env']['falcon_port']
#
# Solr
#
solr_scheme='http'
solr_server_hosts = default("/clusterHostInfo/solr_hosts", None)
if type(solr_server_hosts ) is list:
solr_host = solr_server_hosts[0]
else:
solr_host = solr_server_hosts
solr_port=default("/configuration/solr/solr-env/solr_port","8983")
#
# Spark
#
spark_scheme = 'http'
spark_historyserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", None)
if type(spark_historyserver_hosts) is list:
spark_historyserver_host = spark_historyserver_hosts[0]
else:
spark_historyserver_host = spark_historyserver_hosts
spark_historyserver_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
#
# JobHistory mapreduce
#
mr_scheme='http'
mr_historyserver_address = default("/configurations/mapred-site/mapreduce.jobhistory.webapp.address", None)
#
# Yarn nodemanager
#
nodeui_scheme= 'http'
nodeui_port = "8042"
nm_hosts = default("/clusterHostInfo/nm_hosts", None)
if type(nm_hosts) is list:
nm_host = nm_hosts[0]
else:
nm_host = nm_hosts
has_yarn = default("/configurations/yarn-site", None )
if has_yarn and 'yarn.nodemanager.webapp.address' in config['configurations']['yarn-site']:
nodeui_port = get_port_from_url(config['configurations']['yarn-site']['yarn.nodemanager.webapp.address'])
#
# Spark Thrift UI
#
spark_thriftserver_scheme = 'http'
spark_thriftserver_ui_port = 4039
spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", None)
if type(spark_thriftserver_hosts) is list:
spark_thriftserver_host = spark_thriftserver_hosts[0]
else:
spark_thriftserver_host = spark_thriftserver_hosts
# Knox managed properties
knox_managed_pid_symlink= format('{stack_root}/knox/pids')
#knox log4j
knox_gateway_log_maxfilesize = default('/configurations/gateway-log4j/knox_gateway_log_maxfilesize',256)
knox_gateway_log_maxbackupindex = default('/configurations/gateway-log4j/knox_gateway_log_maxbackupindex',20)
knox_ldap_log_maxfilesize = default('/configurations/ldap-log4j/knox_ldap_log_maxfilesize',256)
knox_ldap_log_maxbackupindex = default('/configurations/ldap-log4j/knox_ldap_log_maxbackupindex',20)
# server configurations
knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
knox_host_name_in_cluster = config['hostname']
knox_host_port = config['configurations']['gateway-site']['gateway.port']
topology_template = config['configurations']['topology']['content']
admin_topology_template = config['configurations']['admin-topology']['content']
knoxsso_topology_template = config['configurations']['knoxsso-topology']['content']
gateway_log4j = config['configurations']['gateway-log4j']['content']
ldap_log4j = config['configurations']['ldap-log4j']['content']
users_ldif = config['configurations']['users-ldif']['content']
java_home = config['hostLevelParams']['java_home']
security_enabled = config['configurations']['cluster-env']['security_enabled']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
if security_enabled:
knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
_hostname_lowercase = config['hostname'].lower()
knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
# for curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
# ranger knox plugin start section
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
# ranger knox plugin enabled property
enable_ranger_knox = default("/configurations/ranger-knox-plugin-properties/ranger-knox-plugin-enabled", "No")
enable_ranger_knox = True if enable_ranger_knox.lower() == 'yes' else False
# get ranger knox properties if enable_ranger_knox is True
if enable_ranger_knox:
# get ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-knox-security']['ranger.plugin.knox.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger knox service/repositry name
repo_name = str(config['clusterName']) + '_knox'
repo_name_value = config['configurations']['ranger-knox-security']['ranger.plugin.knox.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
knox_home = config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
common_name_for_certificate = config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_knox:
external_admin_username = default('/configurations/ranger-knox-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-knox-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-knox-plugin-properties']
policy_user = config['configurations']['ranger-knox-plugin-properties']['policy_user']
repo_config_password = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{stack_root}/knox/ext/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{stack_root}/knox/ext/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
sql_connector_jar = ''
knox_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'knox.url': format("https://{knox_host_name}:{knox_host_port}/gateway/admin/api/v1/topologies"),
'commonNameForCertificate': common_name_for_certificate
}
knox_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(knox_ranger_plugin_config),
'description': 'knox repo',
'name': repo_name,
'repositoryType': 'knox',
'assetType': '5',
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
knox_ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
knox_ranger_plugin_config['policy.download.auth.users'] = knox_user
knox_ranger_plugin_config['tag.download.auth.users'] = knox_user
if stack_supports_ranger_kerberos:
knox_ranger_plugin_config['ambari.service.check.user'] = policy_user
knox_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': knox_ranger_plugin_config,
'description': 'knox repo',
'name': repo_name,
'type': 'knox'
}
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
# need this to capture cluster name from where ranger knox plugin is enabled
cluster_name = config['clusterName']
# ranger knox plugin end section
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
druid_coordinator_urls = ""
if "druid-coordinator" in config['configurations']:
port = config['configurations']['druid-coordinator']['druid.port']
for host in config['clusterHostInfo']['druid_coordinator_hosts']:
druid_coordinator_urls += buildUrlElement("http", host, port, "")
druid_overlord_urls = ""
if "druid-overlord" in config['configurations']:
port = config['configurations']['druid-overlord']['druid.port']
for host in config['clusterHostInfo']['druid_overlord_hosts']:
druid_overlord_urls += buildUrlElement("http", host, port, "")
druid_broker_urls = ""
if "druid-broker" in config['configurations']:
port = config['configurations']['druid-broker']['druid.port']
for host in config['clusterHostInfo']['druid_broker_hosts']:
druid_broker_urls += buildUrlElement("http", host, port, "")
druid_router_urls = ""
if "druid-router" in config['configurations']:
port = config['configurations']['druid-router']['druid.port']
for host in config['clusterHostInfo']['druid_router_hosts']:
druid_router_urls += buildUrlElement("http", host, port, "")
zeppelin_ui_urls = ""
zeppelin_ws_urls = ""
websocket_support = "false"
if "zeppelin-config" in config['configurations']:
port = config['configurations']['zeppelin-config']['zeppelin.server.port']
protocol = "https" if config['configurations']['zeppelin-config']['zeppelin.ssl'] else "http"
host = config['clusterHostInfo']['zeppelin_master_hosts'][0]
zeppelin_ui_urls += buildUrlElement(protocol, host, port, "")
zeppelin_ws_urls += buildUrlElement("ws", host, port, "/ws")
websocket_support = "true"
| |
#!/bin/env python
#-*-coding:utf-8-*-
import pymssql
import string
import sys
import datetime
import time
reload(sys)
sys.setdefaultencoding('utf8')
import ConfigParser
def get_item(data_dict,item):
try:
item_value = data_dict[item]
return item_value
except:
pass
def get_variables(conn,var_name):
try:
curs=conn.cursor()
data=curs.execute('select @@'+var_name);
data=curs.fetchone()
parameters=data[0]
except Exception,e:
print e
finally:
curs.close()
return parameters
def get_version(conn):
try:
curs=conn.cursor()
data=curs.execute("SELECT @@VERSION");
data=curs.fetchone()
result = data[0].split(' ')[3]
except Exception,e:
print e
finally:
curs.close()
return result
def get_uptime(conn):
try:
curs=conn.cursor()
data=curs.execute("SELECT sqlserver_start_time as time_restart,GETDATE() AS time_now,DATEDIFF(mi,sqlserver_start_time,GETDATE()) AS days_since_restart FROM sys.dm_os_sys_info");
data=curs.fetchone()
result = int(data[2]*60)
except Exception,e:
print e
finally:
curs.close()
return result
def get_curr_time(conn):
try:
curs=conn.cursor()
data=curs.execute("SELECT CONVERT(varchar(100), GETDATE(), 120 ) AS time_now");
result=curs.fetchone()
except Exception,e:
print e
finally:
curs.close()
return result
def get_snap_id(conn):
try:
curs=conn.cursor()
data=curs.execute("select CONVERT(varchar(100), GETDATE(), 112) + left(CONVERT(varchar(100), GETDATE(), 14),2) ");
result=curs.fetchone()[0]
except Exception,e:
print e
finally:
curs.close()
return result
def get_buffer_cache_hit_rate(conn):
try:
curs=conn.cursor()
data=curs.execute("""SELECT CAST(CAST((a.cntr_value * 1.0 / b.cntr_value)*100 as int) AS VARCHAR(20)) as BufferCacheHitRatio
FROM (
SELECT * FROM sys.dm_os_performance_counters
WHERE counter_name = 'Buffer cache hit ratio'
AND object_name = CASE WHEN @@SERVICENAME = 'MSSQLSERVER'
THEN 'SQLServer:Buffer Manager'
ELSE 'MSSQL$' + rtrim(@@SERVICENAME) +
':Buffer Manager' END
) a
CROSS JOIN
(
SELECT * from sys.dm_os_performance_counters
WHERE counter_name = 'Buffer cache hit ratio base'
and object_name = CASE WHEN @@SERVICENAME = 'MSSQLSERVER'
THEN 'SQLServer:Buffer Manager'
ELSE 'MSSQL$' + rtrim(@@SERVICENAME) +
':Buffer Manager' END
) b """);
result=curs.fetchone()[0]
except Exception,e:
print e
finally:
curs.close()
return result
def get_logMegabyte(conn):
try:
curs=conn.cursor()
data=curs.execute("""select cntr_value/1024
from sys.dm_os_performance_counters
where counter_name = 'Log File(s) Size (KB)'
and instance_name = '_Total' """);
result=curs.fetchone()[0]
except Exception,e:
print e
finally:
curs.close()
return result
def get_database(conn,field):
try:
curs=conn.cursor()
curs.execute("select %s from v$database" %(field) );
result = curs.fetchone()[0]
except Exception,e:
result = ''
print e
finally:
curs.close()
return result
def ger_processes(conn):
try:
curs=conn.cursor()
curs.execute("SELECT COUNT(*) FROM [master].[dbo].[sysprocesses] WHERE [DBID] IN ( SELECT [dbid] FROM [master].[dbo].[sysdatabases])");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def ger_processes_running(conn):
try:
curs=conn.cursor()
curs.execute("SELECT COUNT(*) FROM [master].[dbo].[sysprocesses] WHERE [DBID] IN ( SELECT [dbid] FROM [master].[dbo].[sysdatabases]) AND status !='SLEEPING' AND status !='BACKGROUND'");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def ger_processes_waits(conn):
try:
curs=conn.cursor()
curs.execute("SELECT COUNT(*) FROM [master].[dbo].[sysprocesses] WHERE [DBID] IN ( SELECT [dbid] FROM [master].[dbo].[sysdatabases]) AND status ='SUSPENDED' AND waittime >2 ");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_mirror_info(conn, db_name):
try:
curs=conn.cursor()
curs.execute("""select m.database_id,
d.name,
substring(mirroring_partner_name, 7, charindex(':',mirroring_partner_name,7)-7) as master_server,
right(mirroring_partner_name, len(mirroring_partner_name) - charindex(':',mirroring_partner_name,7)) as master_port,
m.mirroring_role,
m.mirroring_state,
m.mirroring_state_desc,
m.mirroring_safety_level,
m.mirroring_partner_name,
m.mirroring_partner_instance,
m.mirroring_failover_lsn,
m.mirroring_connection_timeout,
m.mirroring_redo_queue,
m.mirroring_end_of_log_lsn,
m.mirroring_replication_lsn
from sys.database_mirroring m, sys.databases d
where m.mirroring_guid is NOT NULL
AND m.database_id = d.database_id
and d.name = '%s'; """ %(db_name));
result = curs.fetchone()
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_logspace(conn):
try:
curs=conn.cursor()
curs.execute("""DBCC SQLPERF(LOGSPACE) """);
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
| |
#Just a copy of the version in python 2.5 to be used if it's not available in jython 2.1
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
from _pydev_imps._pydev_xmlrpclib import Fault
from _pydev_imps import _pydev_SocketServer as SocketServer
from _pydev_imps import _pydev_BaseHTTPServer as BaseHTTPServer
import sys
import os
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj, i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none, encoding):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method=None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), #@UndefinedVariable exc_value only available when we actually have an exception
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
try:
import pydoc
except ImportError:
return "" #not there for jython
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)} #@UndefinedVariable exc_value only available when we actually have an exception
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
sys.stdout.write('Content-Type: text/xml\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % { #@UndefinedVariable
'code' : code,
'message' : message,
'explain' : explain
}
sys.stdout.write('Status: %d %s\n' % (code, message))
sys.stdout.write('Content-Type: text/html\n')
sys.stdout.write('Content-Length: %d\n' % len(response))
sys.stdout.write('\n')
sys.stdout.write(response)
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
if request_text is None:
request_text = sys.stdin.read()
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
sys.stdout.write('Running XML-RPC server on port 8000\n')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x, y: x + y, 'add')
server.serve_forever()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_scatter
from lego.common.utils import list2tuple, tuple2list, eval_tuple
import numpy as np
import time
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MLP, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.layer1 = nn.Linear(self.input_dim, self.hidden_dim)
self.layer2 = nn.Linear(self.hidden_dim, self.output_dim)
self.layer1_ln = nn.LayerNorm(self.hidden_dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, xs):
l1 = F.relu(self.layer1_ln(self.layer1(xs)))
l2 = self.layer2(l1)
return l2
class BatchPowersetParser(nn.Module):
def __init__(self, center_dim, offset_dim, bert_dim, hidden_dim, nrelation, reduce='max', deep_arch='identity', max_y_score_len=15, requires_vf=False):
super(BatchPowersetParser, self).__init__()
self.hidden_dim = hidden_dim
self.nrelation = nrelation
self.getScore = MLP(hidden_dim+bert_dim, hidden_dim, 1) # hidden_dim + bert_dim = 1568
self.getRelation = MLP(center_dim+offset_dim+bert_dim, hidden_dim, nrelation) # center_dim + offset_dim + bert_dim = 2368
self.requires_vf = requires_vf
if requires_vf:
self.getValue = MLP(hidden_dim+bert_dim, hidden_dim, 1) # hidden_dim + bert_dim = 1568
self.reduce = reduce
self.deep_arch = deep_arch
if deep_arch == 'identity':
self.getFeature = nn.Identity()
self.processFeature = nn.Identity()
elif deep_arch == 'deepsets':
self.getFeature = MLP(center_dim+offset_dim, hidden_dim, hidden_dim) # center_dim + offset_dim = 1600
self.processFeature = MLP(hidden_dim, hidden_dim, hidden_dim) # hidden_dim = 800
if reduce == 'max':
self.gatherFeature = torch_scatter.scatter_max
else:
assert False
self.tfneginf = nn.Parameter(torch.Tensor(- np.ones((1, nrelation)) * float('inf')), requires_grad=False)
self.t_data = 0
self.t_fwd = 0
self.t_loss = 0
self.t_opt = 0
'''
x_scores: (n_data, center dim + offset dim)
x_relations: (n_data, center dim + offset dim + bert dim)
y_scores: (n_program, max_y_score_len)
y_relations: (n_data, nrelation)
mask_relations: (n_data)
w_scores: (n_program)
w_relations: (n_data)
berts: (n_program, bert dim)
edge_indices: (2, n_message_passing)
softmax_edge_indices: (2, n_powerset)
note that n_powerset != n_data * max_y_score_len.
n_powerset = \sum_i 2^n_i (e.g. 2+8+4+16), n_data * max_y_score_len = 4*16,
n_message_passing = \sum n_i * 2^(n_i - 1),
n_program = args.batch_size
'''
def action_value(self, x_scores, x_relations, berts, edge_indices, softmax_edge_indices, value_edge_indices, n_program, max_y_score_len):
feature = self.getFeature(x_scores)
if self.reduce == 'max':
feature = torch.cat([feature, torch.zeros(size=[1, feature.shape[1]]).to(feature.device)], dim=0)
expanded_feature = feature[edge_indices[0]]
gathered_feature, _ = self.gatherFeature(expanded_feature, edge_indices[1], dim_size=torch.max(edge_indices[1]).long()+1, dim=0)
processed_feature = self.processFeature(gathered_feature)
processed_feature = torch.cat([processed_feature, berts], dim=-1)
full_set_processed_feature = processed_feature[value_edge_indices]
value = self.getValue(full_set_processed_feature)
score = self.getScore(processed_feature)
assert not (score == 0).any()
score, _ = torch_scatter.scatter_max(score, softmax_edge_indices[1], dim_size=n_program * max_y_score_len, dim=0) #! to update here
score = torch.where(score != 0, score, torch.tensor(-float('inf')).to(score.device))
score = torch.reshape(score, [n_program, max_y_score_len])
relation = self.getRelation(x_relations)
return score, relation, value
def sample_action(self, score, relation, powersets, train_with_masking, query2box, x_scores, threshold, ent_out, mask_scores_class, transform, relation_selector, old=False, stochastic=True, epsilon=0., beam_size=1, base_ll=0., mask_mode='none'):
transform_type, degree = eval_tuple(transform)
if beam_size > 1:
assert not stochastic
assert mask_scores_class.shape[0] == 1
assert not stochastic
if stochastic:
if np.random.rand() < epsilon:
random_score = torch.cat([torch.ones([1, len(powersets[0])]).to(score.device) / len(powersets[0]), score[:, len(powersets[0]):]], dim=1) #! update for batch
random_score = torch.where(mask_scores_class, random_score, torch.tensor(-float('inf')).to(score.device))
if old:
score = torch.where(mask_scores_class, score, torch.tensor(-float('inf')).to(score.device))
sampled_score = torch.distributions.categorical.Categorical(logits=score).sample()
else:
sampled_score = torch.distributions.categorical.Categorical(logits=score).sample()
else:
score = torch.where(mask_scores_class, score, torch.tensor(-float('inf')).to(score.device))
sampled_score = torch.distributions.categorical.Categorical(logits=score).sample()
sampled_scores_1 = [sampled_score]
updated_lls_1 = [0]
else:
score = torch.where(mask_scores_class, score, torch.tensor(-float('inf')).to(score.device))
assert score.shape[0] == 1
n_legal_choices = torch.sum((score>-1000.).int()).item()
sampled_scores_1 = torch.topk(score, min(n_legal_choices, beam_size))[1]
ll = torch.log(F.softmax(score, dim=-1) + 1e-10)
updated_lls_1 = [base_ll + ll[0][sampled_score].item() for sampled_score in sampled_scores_1[0]]
sampled_scores, sampled_relations, branches_picked_list, mask_relations_class_list, updated_lls = [], [], [], [], []
for i, (sampled_score, updated_ll) in enumerate(zip(sampled_scores_1, updated_lls_1)):
branches_picked = powersets[0][sampled_score[0].item()] #! update for batch; (0,1) or (0) or ()
if len(branches_picked) == 1:
if train_with_masking:
if mask_mode == 'rs':
rel_logits = relation_selector(x_scores[branches_picked[0]].unsqueeze(0))
_, indices = torch.topk(rel_logits, threshold)
mask_relations_class = torch.zeros([len(x_scores), self.nrelation]).to(relation.device)
mask_relations_class[:, indices[0]] = 1
mask_relations_class = mask_relations_class.bool()
relation = torch.where(mask_relations_class, relation, torch.tensor(-float('inf')).to(relation.device))
else:
assert False, "%s not supported as mask_mode" % mask_mode
else:
mask_relations_class = torch.ones([len(x_scores), self.nrelation]).bool().to(relation.device)
if stochastic:
if np.random.rand() < epsilon:
random_relation = mask_relations_class.float()
sampled_relation = torch.distributions.categorical.Categorical(logits=random_relation[branches_picked[0]].unsqueeze(0)).sample().cpu().numpy()
else:
sampled_relation = torch.distributions.categorical.Categorical(logits=relation[branches_picked[0]].unsqueeze(0)).sample().cpu().numpy()
updated_ll = 0
sampled_scores.append(sampled_score)
sampled_relations.append(sampled_relation)
branches_picked_list.append(branches_picked)
mask_relations_class_list.append(mask_relations_class)
updated_lls.append(updated_ll)
else:
n_legal_choices = torch.sum(mask_relations_class[branches_picked[0]].int()).item()
tmp_sampled_relations = torch.topk(relation[branches_picked[0]], min(n_legal_choices, beam_size))[1].unsqueeze(1).cpu().numpy().tolist()
ll = torch.log(F.softmax(relation[branches_picked[0]], dim=-1)+1e-10)
tmp_updated_lls = [updated_ll + ll[sampled_relation[0]].item() for sampled_relation in tmp_sampled_relations]
sampled_scores.extend([sampled_score]*len(tmp_sampled_relations))
sampled_relations.extend(tmp_sampled_relations)
branches_picked_list.extend([branches_picked]*len(tmp_sampled_relations))
mask_relations_class_list.extend([mask_relations_class]*len(tmp_sampled_relations))
updated_lls.extend(tmp_updated_lls)
else:
mask_relations_class = torch.ones_like(relation).bool()
sampled_relation = None
sampled_scores.append(sampled_score)
sampled_relations.append(sampled_relation)
branches_picked_list.append(branches_picked)
mask_relations_class_list.append(mask_relations_class)
updated_lls.append(updated_ll)
return sampled_scores, sampled_relations, branches_picked_list, mask_relations_class_list, updated_lls
def forward(self, x_scores, x_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len):
feature = self.getFeature(x_scores)
if self.reduce == 'max':
feature = torch.cat([feature, torch.zeros(size=[1, feature.shape[1]]).to(feature.device)], dim=0)
expanded_feature = feature[edge_indices[0]]
gathered_feature, _ = self.gatherFeature(expanded_feature, edge_indices[1], dim_size=torch.max(edge_indices[1]).long()+1, dim=0)
processed_feature = self.processFeature(gathered_feature)
processed_feature = torch.cat([processed_feature, berts], dim=-1)
score = self.getScore(processed_feature)
assert not (score == 0).any()
score, _ = torch_scatter.scatter_max(score, softmax_edge_indices[1], dim_size=n_program * max_y_score_len, dim=0) #! to update here
score = torch.where(score != 0, score, torch.tensor(-float('inf')).to(score.device))
score = torch.reshape(score, [n_program, max_y_score_len])
relation = self.getRelation(x_relations)
return score, relation
@staticmethod
def train_step(model, optimizer, train_iterator, args, step, writer):
optimizer.zero_grad()
x_scores, x_relations, y_scores, y_relations, mask_relations, w_scores, w_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len, mask_relations_class, question_indices, step_indices, noisy_mask_relations = train_iterator.next_supervised()
if args.cuda:
x_scores = x_scores.cuda()
x_relations = x_relations.cuda()
y_scores = y_scores.cuda()
y_relations = y_relations.cuda()
mask_relations = mask_relations.cuda()
w_scores = w_scores.cuda()
w_relations = w_relations.cuda()
berts = berts.cuda()
edge_indices = edge_indices.cuda()
softmax_edge_indices = softmax_edge_indices.cuda()
mask_relations_class = mask_relations_class.cuda()
question_indices = question_indices.cuda()
step_indices = step_indices.cuda()
noisy_mask_relations = noisy_mask_relations.cuda()
scores, relations = model(x_scores, x_relations, berts, edge_indices, softmax_edge_indices, n_program, max_y_score_len)
score_loss = torch.nn.CrossEntropyLoss(reduction='none')(scores, y_scores)
if args.train_with_masking:
relations = torch.where(mask_relations_class, relations, torch.tensor(-float('inf')).to(relations.device))
relation_loss = torch.nn.CrossEntropyLoss(reduction='none')(relations, y_relations) * mask_relations
else:
relation_loss = torch.nn.CrossEntropyLoss(reduction='none')(relations, y_relations) * mask_relations
relation_loss = relation_loss[noisy_mask_relations]
all_loss = score_loss + args.relation_coeff * relation_loss
all_loss = torch_scatter.scatter_add(all_loss, step_indices[1], dim=0, dim_size=torch.max(step_indices[1])+1)
loss, _ = torch_scatter.scatter_min(all_loss, question_indices[1], dim=0, dim_size=torch.max(question_indices[1])+1)
loss = torch.mean(loss)
score_loss = torch.mean(torch_scatter.scatter_min(torch_scatter.scatter_add(score_loss, step_indices[1], dim=0, dim_size=torch.max(step_indices[1])+1), question_indices[1], dim=0, dim_size=torch.max(question_indices[1])+1)[0])
relation_loss = torch.mean(torch_scatter.scatter_min(torch_scatter.scatter_add(relation_loss, step_indices[1], dim=0, dim_size=torch.max(step_indices[1])+1), question_indices[1], dim=0, dim_size=torch.max(question_indices[1])+1)[0])
loss.backward()
optimizer.step()
log = {
'supervised_loss': loss.item(),
'supervised_score_loss': score_loss.item(),
'supervised_relation_loss': relation_loss.item(),
}
for metric in log:
writer.add_scalar(metric, log[metric], step)
return log
| |
#from SureteDuQuebec_PublicDataSystem import Exceptions
import Exceptions
from Exceptions import ErrorSureteDuQuebec
#from SureteDuQuebec_PublicDataSystem.Exceptions import ErrorSureteDuQuebec
#from Exceptions import ErrorSureteDuQuebec
class DecoratorSQ:
DictReference={
'GlobalKeyNameAssertion':{ 'overwritting':True, 'error':ErrorSureteDuQuebec.WarnAttributeOverwriting },
'Error':{
'Handler':False,
'Name':[ None ] } }
ReferenceTransfert={ }
AttributeState=[ 'NotWrited','Same','Writed' ]
DictReferenceValue=None
ParentNode=None
ChildNode=None
@staticmethod
def getDictReference( self ):
return self.DictReferenceValue
@staticmethod
def setDictReference( self, value ):
self.ParentNode, self.ChildNode = value
self.DictReferenceValue=DictReference[self.ParentNode][self.ChildNode]
PropertyReadDR=property( getDictReference, setDictReference )
OldDictReferenceValue=None
DictReferenceValue=None
DictAttributeStatement=None
def getDictReferenceValue( self ):
return self.DictAttributeStatement
@staticmethod
def CompareAttributeStateWritable( self, AttrStatement=2 ):
self.OldDictReferenceValue=DictReference[self.ParentNode][self.ChildNode]
DictReference[self.ParentNode][self.ChildNode]=self.OldDictReferenceValue
self.DictAttributeStatement=self.AttributeState[AttrStatement]
@staticmethod
def setDictReferenceValue( self, value ):
if len( value ) == 1:
self.DictReferenceValue=value
elif len( value ) == 2:
self.ChildNode, self.DictReferenceValue = value
elif len( value ) == 3:
self.ParentNode, self.ChildNode, self.DictReferenceValue = value
if DictReference[self.ParentNode][self.ChildNode] == self.DictReferenceValue:
self.DictAttributeStatement=self.AttributeState[1]
else:
self.CompareAttributeStateWritable()
PropertyWriteDR=property( getDictReferenceValue, setDictReferenceValue )
@classmethod
def InnerVariableFromFuncModule( ModuleImport, defaultNodeImpl=__builtins__ ):
for item in dir( ModuleImport ):
TypeItem=type(getattr( iterpipes, item))
if TypeItem == type(nameTestType):
setattr( defaultNodeImpl, item, getattr( iterpipes, item) )
@classmethod
def InnerModuleImport( ModuleImportName, DefaultListFunc=[], defaultNodeImpl=__builtins__, defaultImporter='__import__' ):
if not ModuleImportName in vars():
print "Module Name %s not in memory, importing-it" % ( ItemModuleName )
getattr( defaultNodeImpl, defaultImporter )( ItemModuleName , {}, {}, DefaultListFunc , -1 )
@classmethod
def InnerCreateKeyName( ListName ):
for itemKey in ListName:
if itemKey in vars():
DecoratorSQ.ReferenceTransfert[itemKey]=getattr( __builtins__, 'eval' )( itemKey )
setattr( __builtins__, itemKey, itemKey )
### Theorical method, it restore-back the global variable
### once the function is quitting, so they can virtually
### uses any key with .
@classmethod
def InnerDeleteKeyName( ListName ):
for itemKey in ListName:
getattr( __builtins__, 'eval')( "del %s" % itemKey )
if itemKey in DecoratorSQ.ReferenceTransfert.keys():
setattr( __builtins__, itemKey, DecoratorSQ.ReferenceTransfert[itemKey] )
del DecoratorSQ.ReferenceTransfert[itemKey]
@staticmethod
def ImplementError( ErrorName ):
self.DictReferencep[ 'Error' ][ 'Handler' ]= True
self.DictReferencep[ 'Name' ][ 'Handler' ]=ErrorName
@staticmethod
def TimerImplement( ClassName, AttrNameProcHwnd ):
"""
This Decorator Will:
- Create a variable funcName being assigned automatically to funcName the FunctionName
The marshaller computes a key from function arguments
"""
def decorator(func):
def inner(*args, **kwargs):
if self.DictReference[ 'Error' ][ 'Handler' ] == True:
raise getattr( ClassName, self.DictReferencep[ 'Name' ][ 'Handler' ] )
if not func.__name__ in getattr( ClassName , AttrNameProcHwnd ).keys():
getattr( ClassName, AttrNameProcHwnd )[func.__name__]=list()
else:
getattr( ClassName, AttrNameProcHwnd )[func.__name__].append( time.time() )
func( *args, **kwargs )
return inner
return decorator
@staticmethod
def VariableFromFuncModule( ModuleImport, defaultNodeImpl=__builtins__ ):
"""
This Decorator Will:
- Create a variable funcName being assigned automatically to funcName the FunctionName
The marshaller computes a key from function arguments
"""
def decorator(func):
def inner(*args, **kwargs):
DecoratorSQ.VariableFromFuncModule( ModuleImport, defaultNodeImpl )
func( *args, **kwargs )
return inner
return decorator
@staticmethod
def ModuleImport( ModuleImportName, DefaultListFunc=[], defaultNodeImpl=__builtins__, defaultImporter='__import__' ):
"""
This Decorator Will:
- Create a variable funcName being assigned automatically to funcName the FunctionName
The marshaller computes a key from function arguments
"""
def decorator(func):
def inner(*args, **kwargs):
DecoratorSQ.InnerModuleImport( ModuleImportName, DefaultListFunc, defaultNodeImpl, defaultImporter )
if len( defaultNodeImpl ) > 0 :
DecoratorSQ.VariableFromFuncModule( getattr( __builtins__,ModuleImportName), DefaultListFunc )
func( *args, **kwargs )
return inner
return decorator
@staticmethod
def ParseTheKargs( ClassName, ShelveObject, TypeObj ):
"""
This Decorator Will:
- Create a variable funcName being assigned automatically to funcName the FunctionName
The marshaller computes a key from function arguments
"""
def decorator(func):
def inner(*args, **kwargs):
for ItemKeyName in kwargs.keys():
if ShelveObject == None:
if not hasattr( ClassName, ItemKeyName ):
setattr( ClassName, ItemKeyName, kwargs[ItemKeyName] )
else:
if TypeObj == None:
if not hasattr( getattr( ClassName,ShelveObject ), ItemKeyName ):
setattr( getattr( ClassName,ShelveObject ), ItemKeyName, kwargs[ItemKeyName] )
else:
setattr( getattr( ClassName,ShelveObject ), ItemKeyName, kwargs[ItemKeyName] )
else:
if type(TypeObj) == type(dict()):
if not ItemKeyName in getattr( ClassName,ShelveObject ).keys():
setattr( ClassName,ShelveObject, { ItemKeyName:kwargs[ItemKeyName] } )
else:
setattr( ClassName,ShelveObject, { ItemKeyName:kwargs[ItemKeyName] } )
func( *args, **kwargs )
return inner
return decorator
@staticmethod
def InstanceFuncMessage( MessageName ):
"""
"""
def decorator(func):
def inner(*args, **kwargs):
print "From Def:%s\n\t%s." % ( func.__name__ , MessageName )
func( *args, **kwargs )
return inner
return decorator
@staticmethod
def GlobalKeyNameAssertion( ListKey ):
"""
"""
def decorator(func):
def inner(*args, **kwargs):
DecoratorSQ.InnerCreateKeyName( ListKey )
func( *args, **kwargs )
DecoratorSQ.InnerDeleteKeyName( ListKey )
return inner
return decorator
| |
import os
import re
import json
import urllib2
import urllib
import csv, codecs, cStringIO
import colander
import locale
import pytz
from email.utils import parseaddr
from types import (
IntType,
LongType,
)
from datetime import (
datetime,
timedelta,
)
from pyramid.threadlocal import get_current_registry
STATUS = (
(1, 'Aktif'),
(0, 'Inaktif'),
)
SUMMARIES = (
(1, 'Header'),
(0, 'Detail'),
)
################
# Phone number #
################
MSISDN_ALLOW_CHARS = map(lambda x: str(x), range(10)) + ['+']
BULANS = ((1,'Januari'),
(2,'Februari'),
(3,'Maret'),
(4,'April'),
(5,'Mei'),
(6,'Juni'),
(7,'Juli'),
(8,'Agustus'),
(9,'September'),
(10,'Oktober'),
(11,'November'),
(12,'Desember'),
)
def email_validator(node, value):
name, email = parseaddr(value)
if not email or email.find('@') < 0:
raise colander.Invalid(node, 'Invalid email format')
def get_msisdn(msisdn, country='+62'):
for ch in msisdn:
if ch not in MSISDN_ALLOW_CHARS:
return
try:
i = int(msisdn)
except ValueError, err:
return
if not i:
return
if len(str(i)) < 7:
return
if re.compile(r'^\+').search(msisdn):
return msisdn
if re.compile(r'^0').search(msisdn):
return '%s%s' % (country, msisdn.lstrip('0'))
################
# Money format #
################
def should_int(value):
int_ = int(value)
return int_ == value and int_ or value
def thousand(value, float_count=None):
if float_count is None: # autodetection
if type(value) in (IntType, LongType):
float_count = 0
else:
float_count = 2
return locale.format('%%.%df' % float_count, value, True)
def money(value, float_count=None, currency=None):
if value < 0:
v = abs(value)
format_ = '(%s)'
else:
v = value
format_ = '%s'
if currency is None:
currency = locale.localeconv()['currency_symbol']
s = ' '.join([currency, thousand(v, float_count)])
return format_ % s
###########
# Pyramid #
###########
def get_settings():
return get_current_registry().settings
def get_timezone():
settings = get_settings()
return pytz.timezone(settings.timezone)
########
# Time #
########
one_second = timedelta(1.0/24/60/60)
TimeZoneFile = '/etc/timezone'
if os.path.exists(TimeZoneFile):
DefaultTimeZone = open(TimeZoneFile).read().strip()
else:
DefaultTimeZone = 'Asia/Jakarta'
def as_timezone(tz_date):
localtz = get_timezone()
if not tz_date.tzinfo:
tz_date = create_datetime(tz_date.year, tz_date.month, tz_date.day,
tz_date.hour, tz_date.minute, tz_date.second,
tz_date.microsecond)
return tz_date.astimezone(localtz)
def create_datetime(year, month, day, hour=0, minute=7, second=0,
microsecond=0):
tz = get_timezone()
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo=tz)
def create_date(year, month, day):
return create_datetime(year, month, day)
def create_now():
tz = get_timezone()
return datetime.now(tz)
def date_from_str(value):
separator = None
value = value.split()[0] # dd-mm-yyyy HH:MM:SS
for s in ['-', '/']:
if value.find(s) > -1:
separator = s
break
if separator:
t = map(lambda x: int(x), value.split(separator))
y, m, d = t[2], t[1], t[0]
if d > 999: # yyyy-mm-dd
y, d = d, y
else: # if len(value) == 8: # yyyymmdd
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:])
return date(y, m, d)
def dmy(tgl):
return tgl.strftime('%d-%m-%Y')
def dmyhms(t):
return t.strftime('%d-%m-%Y %H:%M:%S')
def next_month(year, month):
if month == 12:
month = 1
year += 1
else:
month += 1
return year, month
def best_date(year, month, day):
try:
return date(year, month, day)
except ValueError:
last_day = calendar.monthrange(year, month)[1]
return date(year, month, last_day)
def next_month_day(year, month, day):
year, month = next_month(year, month)
return best_date(year, month, day)
################
# Months #
################
BULANS = (
('01', 'Januari'),
('02', 'Februari'),
('03', 'Maret'),
('04', 'April'),
('05', 'Mei'),
('06', 'Juni'),
('07', 'Juli'),
('08', 'Agustus'),
('09', 'September'),
('10', 'Oktober'),
('11', 'November'),
('12', 'Desember'),
)
def get_months(request):
return BULANS
def email_validator(node, value):
name, email = parseaddr(value)
if not email or email.find('@') < 0:
raise colander.Invalid(node, 'Invalid email format')
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d
def _upper(chain):
ret = chain.upper()
if ret:
return ret
else:
return chain
def clean(s):
r = ''
for ch in s:
if ch not in string.printable:
ch = ''
r += ch
return r
def xls_reader(filename, sheet):
workbook = xlrd.open_workbook(filename)
worksheet = workbook.sheet_by_name(sheet)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = -1
csv = []
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
txt = []
while curr_cell < num_cells:
curr_cell += 1
# Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
cell_type = worksheet.cell_type(curr_row, curr_cell)
cell_value = worksheet.cell_value(curr_row, curr_cell)
if cell_type==1 or cell_type==2:
try:
cell_value = str(cell_value)
except:
cell_value = '0'
else:
cell_value = clean(cell_value)
if curr_cell==0 and cell_value.strip()=="Tanggal":
curr_cell=num_cells
elif curr_cell==0 and cell_value.strip()=="":
curr_cell = num_cells
curr_row = num_rows
else:
txt.append(cell_value)
if txt:
csv.append(txt)
return csv
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
print data
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class CSVRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
""" Returns a plain CSV-encoded string with content-type
``text/csv``. The content-type may be overridden by
setting ``request.response.content_type``."""
request = system.get('request')
if request is not None:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'text/csv'
fout = io.BytesIO() #StringIO()
fcsv = csv.writer(fout, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#fcsv = UnicodeWriter(fout, delimiter=',', quotechar=',', quoting=csv.QUOTE_MINIMAL)
#print value.get('header', [])
fcsv.writerow(value.get('header', []))
fcsv.writerows(value.get('rows', []))
return fout.getvalue()
class SaveFile(object):
def __init__(self, dir_path):
self.dir_path = dir_path
# Awalan nama file diacak sedangkan akhirannya tidak berubah
def create_fullpath(self, ext=''):
return fullpath
def save(self, content, filename=None):
fullpath = create_fullpath()
f = open(fullpath, 'wb')
f.write(content)
f.close()
return fullpath
def get_random_string():
return ''.join(choice(ascii_uppercase + ascii_lowercase + digits) \
for _ in range(6))
def get_ext(filename):
return os.path.splitext(filename)[-1]
class Upload(SaveFile):
def __init__(self):
settings = get_settings()
dir_path = os.path.realpath(settings['static_files'])
SaveFile.__init__(self, dir_path)
def save(self, file):
input_file = file['fp']
ext = get_ext(file['filename'])
filename = '%s%s' % (uuid.uuid4(),ext)
fullpath = os.path.join(self.dir_path, filename)
output_file = open(fullpath, 'wb')
input_file.seek(0)
while True:
data = input_file.read(2<<16)
if not data:
break
output_file.write(data)
output_file.close()
return filename
def to_str(v):
typ = type(v)
print typ, v
if typ == DateType:
return dmy(v)
if typ == DateTimeType:
return dmyhms(v)
if v == 0:
return '0'
if typ in [UnicodeType, StringType]:
return v.strip()
elif typ is BooleanType:
return v and '1' or '0'
return v and str(v) or ''
def dict_to_str(d):
r = {}
for key in d:
val = d[key]
r[key] = to_str(val)
return r
# Data Tables
def _DTstrftime(chain):
ret = chain and datetime.strftime(chain, "%d-%m-%Y")
if ret:
return ret
else:
return chain
def _DTnumberformat(chain):
import locale
locale.setlocale(locale.LC_ALL, 'id_ID.utf8')
ret = locale.format("%d", chain, grouping=True)
if ret:
return ret
else:
return chain
def _DTactive(chain):
ret = chain==1 and 'Aktif' or 'Inaktif'
if ret:
return ret
else:
return chain
#Captcha Response
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def captcha_submit(recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode ({
'privatekey': encode_if_necessary(private_key),
'remoteip' : encode_if_necessary(remoteip),
'secret' : encode_if_necessary(recaptcha_challenge_field),
'response' : encode_if_necessary(recaptcha_response_field),
})
#print "https://%s/recaptcha/api/siteverify" % VERIFY_SERVER
request = urllib2.Request (
url = "https://www.google.com/recaptcha/api/siteverify",
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = json.loads(httpresp.read())
httpresp.close()
return_code = return_values['success']
if (return_code == True):
return RecaptchaResponse (is_valid=True)
else:
return RecaptchaResponse (is_valid=False, error_code = return_values['error_code'])
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta2MetricSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container_resource': 'V2beta2ContainerResourceMetricSource',
'external': 'V2beta2ExternalMetricSource',
'object': 'V2beta2ObjectMetricSource',
'pods': 'V2beta2PodsMetricSource',
'resource': 'V2beta2ResourceMetricSource',
'type': 'str'
}
attribute_map = {
'container_resource': 'containerResource',
'external': 'external',
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
def __init__(self, container_resource=None, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): # noqa: E501
"""V2beta2MetricSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container_resource = None
self._external = None
self._object = None
self._pods = None
self._resource = None
self._type = None
self.discriminator = None
if container_resource is not None:
self.container_resource = container_resource
if external is not None:
self.external = external
if object is not None:
self.object = object
if pods is not None:
self.pods = pods
if resource is not None:
self.resource = resource
self.type = type
@property
def container_resource(self):
"""Gets the container_resource of this V2beta2MetricSpec. # noqa: E501
:return: The container_resource of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ContainerResourceMetricSource
"""
return self._container_resource
@container_resource.setter
def container_resource(self, container_resource):
"""Sets the container_resource of this V2beta2MetricSpec.
:param container_resource: The container_resource of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ContainerResourceMetricSource
"""
self._container_resource = container_resource
@property
def external(self):
"""Gets the external of this V2beta2MetricSpec. # noqa: E501
:return: The external of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ExternalMetricSource
"""
return self._external
@external.setter
def external(self, external):
"""Sets the external of this V2beta2MetricSpec.
:param external: The external of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ExternalMetricSource
"""
self._external = external
@property
def object(self):
"""Gets the object of this V2beta2MetricSpec. # noqa: E501
:return: The object of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ObjectMetricSource
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this V2beta2MetricSpec.
:param object: The object of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ObjectMetricSource
"""
self._object = object
@property
def pods(self):
"""Gets the pods of this V2beta2MetricSpec. # noqa: E501
:return: The pods of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2PodsMetricSource
"""
return self._pods
@pods.setter
def pods(self, pods):
"""Sets the pods of this V2beta2MetricSpec.
:param pods: The pods of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2PodsMetricSource
"""
self._pods = pods
@property
def resource(self):
"""Gets the resource of this V2beta2MetricSpec. # noqa: E501
:return: The resource of this V2beta2MetricSpec. # noqa: E501
:rtype: V2beta2ResourceMetricSource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V2beta2MetricSpec.
:param resource: The resource of this V2beta2MetricSpec. # noqa: E501
:type: V2beta2ResourceMetricSource
"""
self._resource = resource
@property
def type(self):
"""Gets the type of this V2beta2MetricSpec. # noqa: E501
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:return: The type of this V2beta2MetricSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V2beta2MetricSpec.
type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
:param type: The type of this V2beta2MetricSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta2MetricSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta2MetricSpec):
return True
return self.to_dict() != other.to_dict()
| |
#!/usr/bin/python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import subprocess
from vtdb import tablet3
from vtdb import topology
from zk import zkocc
import os
import utils
import tablet
# single shard / 2 tablets
shard_0_master = tablet.Tablet()
shard_0_slave = tablet.Tablet()
cert_dir = utils.tmp_root + "/certs"
# FIXME(alainjobart) use the mysql certs for mysql replication tests
# I have trouble generating the right my.cnf, may add some env. variables
# to find the templates, and generate a different one for this test.
def openssl(cmd):
result = subprocess.call(["openssl"] + cmd)
if result != 0:
raise utils.TestError("OpenSSL command failed: %s" % " ".join(cmd))
def setup():
utils.zk_setup()
utils.debug("Creating certificates")
os.makedirs(cert_dir)
# Create CA certificate
ca_key = cert_dir + "/ca-key.pem"
ca_cert = cert_dir + "/ca-cert.pem"
openssl(["genrsa", "-out", cert_dir + "/ca-key.pem"])
ca_config = cert_dir + "/ca.config"
with open(ca_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql CA
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(["req", "-new", "-x509", "-nodes", "-days", "3600", "-batch",
"-config", ca_config,
"-key", ca_key,
"-out", ca_cert])
# Create mysql server certificate, remove passphrase, and sign it
server_key = cert_dir + "/server-key.pem"
server_cert = cert_dir + "/server-cert.pem"
server_req = cert_dir + "/server-req.pem"
server_config = cert_dir + "/server.config"
with open(server_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Server
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(["req", "-newkey", "rsa:2048", "-days", "3600", "-nodes", "-batch",
"-config", server_config,
"-keyout", server_key, "-out", server_req])
openssl(["rsa", "-in", server_key, "-out", server_key])
openssl(["x509", "-req",
"-in", server_req,
"-days", "3600",
"-CA", ca_cert,
"-CAkey", ca_key,
"-set_serial", "01",
"-out", server_cert])
# Create mysql client certificate, remove passphrase, and sign it
client_key = cert_dir + "/client-key.pem"
client_cert = cert_dir + "/client-cert.pem"
client_req = cert_dir + "/client-req.pem"
client_config = cert_dir + "/client.config"
with open(client_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Client
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(["req", "-newkey", "rsa:2048", "-days", "3600", "-nodes", "-batch",
"-config", client_config,
"-keyout", client_key, "-out", client_req])
openssl(["rsa", "-in", client_key, "-out", client_key])
openssl(["x509", "-req",
"-in", client_req,
"-days", "3600",
"-CA", ca_cert,
"-CAkey", ca_key,
"-set_serial", "02",
"-out", client_cert])
# Create vt server certificate, remove passphrase, and sign it
vt_server_key = cert_dir + "/vt-server-key.pem"
vt_server_cert = cert_dir + "/vt-server-cert.pem"
vt_server_req = cert_dir + "/vt-server-req.pem"
openssl(["req", "-newkey", "rsa:2048", "-days", "3600", "-nodes", "-batch",
"-keyout", vt_server_key, "-out", vt_server_req])
openssl(["rsa", "-in", vt_server_key, "-out", vt_server_key])
openssl(["x509", "-req",
"-in", vt_server_req,
"-days", "3600",
"-CA", ca_cert,
"-CAkey", ca_key,
"-set_serial", "03",
"-out", vt_server_cert])
extra_my_cnf = cert_dir + "/secure.cnf"
fd = open(extra_my_cnf, "w")
fd.write("ssl-ca=" + ca_cert + "\n")
fd.write("ssl-cert=" + server_cert + "\n")
fd.write("ssl-key=" + server_key + "\n")
fd.close()
setup_procs = [
shard_0_master.init_mysql(extra_my_cnf=extra_my_cnf),
shard_0_slave.init_mysql(extra_my_cnf=extra_my_cnf),
]
utils.wait_procs(setup_procs)
def teardown():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_slave.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
utils.zk_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_slave.remove_tree()
def run_test_secure():
utils.run_vtctl('CreateKeyspace test_keyspace')
shard_0_master.init_tablet('master', 'test_keyspace', '0')
shard_0_slave.init_tablet('replica', 'test_keyspace', '0')
utils.run_vtctl('RebuildShardGraph test_keyspace/0', auto_log=True)
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)
zkocc_server = utils.zkocc_start()
# create databases so vttablet can start behaving normally
shard_0_master.create_db('vt_test_keyspace')
shard_0_slave.create_db('vt_test_keyspace')
# start the tablets
shard_0_master.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem")
shard_0_slave.start_vttablet(cert=cert_dir + "/vt-server-cert.pem",
key=cert_dir + "/vt-server-key.pem",
repl_extra_flags={
'flags': 2048,
'ssl_ca': cert_dir + "/ca-cert.pem",
'ssl_cert': cert_dir + "/client-cert.pem",
'ssl_key': cert_dir + "/client-key.pem",
})
# Reparent using SSL
utils.run_vtctl('ReparentShard -force test_keyspace/0 ' + shard_0_master.tablet_alias, auto_log=True)
# then get the topology and check it
zkocc_client = zkocc.ZkOccConnection("localhost:%u" % utils.zkocc_port_base,
"test_nj", 30.0)
topology.read_keyspaces(zkocc_client)
shard_0_master_addrs = topology.get_host_port_by_name(zkocc_client, "test_keyspace.0.master:_vts")
if len(shard_0_master_addrs) != 1:
raise utils.TestError('topology.get_host_port_by_name failed for "test_keyspace.0.master:_vts", got: %s' % " ".join(["%s:%u(%s)" % (h, p, str(e)) for (h, p, e) in shard_0_master_addrs]))
if shard_0_master_addrs[0][2] != True:
raise utils.TestError('topology.get_host_port_by_name failed for "test_keyspace.0.master:_vts" is not encrypted')
utils.debug("shard 0 master addrs: %s" % " ".join(["%s:%u(%s)" % (h, p, str(e)) for (h, p, e) in shard_0_master_addrs]))
# make sure asking for optionally secure connections works too
auto_addrs = topology.get_host_port_by_name(zkocc_client, "test_keyspace.0.master:_vtocc", encrypted=True)
if auto_addrs != shard_0_master_addrs:
raise utils.TestError('topology.get_host_port_by_name doesn\'t resolve encrypted addresses properly: %s != %s' % (str(shard_0_master_addrs), str(auto_addrs)))
# try to connect with regular client
try:
conn = tablet3.TabletConnection("%s:%u" % (shard_0_master_addrs[0][0], shard_0_master_addrs[0][1]),
"test_keyspace", "0", 10.0)
conn.dial()
raise utils.TestError("No exception raised to secure port")
except tablet3.FatalError as e:
if not e.args[0][0].startswith('Unexpected EOF in handshake to'):
raise utils.TestError("Unexpected exception: %s" % str(e))
# connect to encrypted port
conn = tablet3.TabletConnection("%s:%u" % (shard_0_master_addrs[0][0], shard_0_master_addrs[0][1]),
"test_keyspace", "0", 5.0, encrypted=True)
conn.dial()
(results, rowcount, lastrowid, fields) = conn._execute("select 1 from dual", {})
if (len(results) != 1 or \
results[0][0] != 1):
print "conn._execute returned:", results
raise utils.TestError('wrong conn._execute output')
# trigger a time out on a secure connection, see what exception we get
try:
conn._execute("select sleep(100) from dual", {})
raise utils.TestError("No timeout exception")
except tablet3.TimeoutError as e:
utils.debug("Got the right exception for SSL timeout: %s" % str(e))
# kill everything
utils.kill_sub_process(zkocc_server)
shard_0_master.kill_vttablet()
shard_0_slave.kill_vttablet()
def run_all():
run_test_secure()
def main():
args = utils.get_args()
try:
if args[0] != 'teardown':
setup()
if args[0] != 'setup':
for arg in args:
globals()[arg]()
print "GREAT SUCCESS"
except KeyboardInterrupt:
pass
except utils.Break:
utils.options.skip_teardown = True
finally:
teardown()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python3
"""
Script to upload 32 bits and 64 bits wheel packages for Python 3.3 on Windows.
Usage: "python release.py HG_TAG" where HG_TAG is a Mercurial tag, usually
a version number like "3.4.2".
Requirements:
- Python 3.3 and newer requires the Windows SDK 7.1 to build wheel packages
- Python 2.7 requires the Windows SDK 7.0
- the aiotest module is required to run aiotest tests
"""
import contextlib
import optparse
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
PROJECT = 'asyncio'
DEBUG_ENV_VAR = 'PYTHONASYNCIODEBUG'
PYTHON_VERSIONS = (
(3, 3),
)
PY3 = (sys.version_info >= (3,))
HG = 'hg'
SDK_ROOT = r"C:\Program Files\Microsoft SDKs\Windows"
BATCH_FAIL_ON_ERROR = "@IF %errorlevel% neq 0 exit /b %errorlevel%"
WINDOWS = (sys.platform == 'win32')
def get_architecture_bits():
arch = platform.architecture()[0]
return int(arch[:2])
class PythonVersion:
def __init__(self, major, minor, bits):
self.major = major
self.minor = minor
self.bits = bits
self._executable = None
@staticmethod
def running():
bits = get_architecture_bits()
pyver = PythonVersion(sys.version_info.major,
sys.version_info.minor,
bits)
pyver._executable = sys.executable
return pyver
def _get_executable_windows(self, app):
if self.bits == 32:
executable = 'c:\\Python%s%s_32bit\\python.exe'
else:
executable = 'c:\\Python%s%s\\python.exe'
executable = executable % (self.major, self.minor)
if not os.path.exists(executable):
print("Unable to find python %s" % self)
print("%s does not exists" % executable)
sys.exit(1)
return executable
def _get_executable_unix(self, app):
return 'python%s.%s' % (self.major, self.minor)
def get_executable(self, app):
if self._executable:
return self._executable
if WINDOWS:
executable = self._get_executable_windows(app)
else:
executable = self._get_executable_unix(app)
code = (
'import platform, sys; '
'print("{ver.major}.{ver.minor} {bits}".format('
'ver=sys.version_info, '
'bits=platform.architecture()[0]))'
)
try:
exitcode, stdout = app.get_output(executable, '-c', code,
ignore_stderr=True)
except OSError as exc:
print("Error while checking %s:" % self)
print(str(exc))
print("Executable: %s" % executable)
sys.exit(1)
else:
stdout = stdout.rstrip()
expected = "%s.%s %sbit" % (self.major, self.minor, self.bits)
if stdout != expected:
print("Python version or architecture doesn't match")
print("got %r, expected %r" % (stdout, expected))
print("Executable: %s" % executable)
sys.exit(1)
self._executable = executable
return executable
def __str__(self):
return 'Python %s.%s (%s bits)' % (self.major, self.minor, self.bits)
class Release(object):
def __init__(self):
root = os.path.dirname(__file__)
self.root = os.path.realpath(root)
# Set these attributes to True to run also register sdist upload
self.wheel = False
self.test = False
self.register = False
self.sdist = False
self.aiotest = False
self.verbose = False
self.upload = False
# Release mode: enable more tests
self.release = False
self.python_versions = []
if WINDOWS:
supported_archs = (32, 64)
else:
bits = get_architecture_bits()
supported_archs = (bits,)
for major, minor in PYTHON_VERSIONS:
for bits in supported_archs:
pyver = PythonVersion(major, minor, bits)
self.python_versions.append(pyver)
@contextlib.contextmanager
def _popen(self, args, **kw):
verbose = kw.pop('verbose', True)
if self.verbose and verbose:
print('+ ' + ' '.join(args))
if PY3:
kw['universal_newlines'] = True
proc = subprocess.Popen(args, **kw)
try:
yield proc
except:
proc.kill()
proc.wait()
raise
def get_output(self, *args, **kw):
kw['stdout'] = subprocess.PIPE
ignore_stderr = kw.pop('ignore_stderr', False)
if ignore_stderr:
devnull = open(os.path.devnull, 'wb')
kw['stderr'] = devnull
else:
kw['stderr'] = subprocess.STDOUT
try:
with self._popen(args, **kw) as proc:
stdout, stderr = proc.communicate()
return proc.returncode, stdout
finally:
if ignore_stderr:
devnull.close()
def check_output(self, *args, **kw):
exitcode, output = self.get_output(*args, **kw)
if exitcode:
sys.stdout.write(output)
sys.stdout.flush()
sys.exit(1)
return output
def run_command(self, *args, **kw):
with self._popen(args, **kw) as proc:
exitcode = proc.wait()
if exitcode:
sys.exit(exitcode)
def get_local_changes(self):
status = self.check_output(HG, 'status')
return [line for line in status.splitlines()
if not line.startswith("?")]
def remove_directory(self, name):
path = os.path.join(self.root, name)
if os.path.exists(path):
if self.verbose:
print("Remove directory: %s" % name)
shutil.rmtree(path)
def remove_file(self, name):
path = os.path.join(self.root, name)
if os.path.exists(path):
if self.verbose:
print("Remove file: %s" % name)
os.unlink(path)
def windows_sdk_setenv(self, pyver):
if (pyver.major, pyver.minor) >= (3, 3):
path = "v7.1"
sdkver = (7, 1)
else:
path = "v7.0"
sdkver = (7, 0)
setenv = os.path.join(SDK_ROOT, path, 'Bin', 'SetEnv.cmd')
if not os.path.exists(setenv):
print("Unable to find Windows SDK %s.%s for %s"
% (sdkver[0], sdkver[1], pyver))
print("Please download and install it")
print("%s does not exists" % setenv)
sys.exit(1)
if pyver.bits == 64:
arch = '/x64'
else:
arch = '/x86'
cmd = ["CALL", setenv, "/release", arch]
return (cmd, sdkver)
def quote(self, arg):
if not re.search("[ '\"]", arg):
return arg
# FIXME: should we escape "?
return '"%s"' % arg
def quote_args(self, args):
return ' '.join(self.quote(arg) for arg in args)
def cleanup(self):
if self.verbose:
print("Cleanup")
self.remove_directory('build')
self.remove_directory('dist')
self.remove_file('_overlapped.pyd')
self.remove_file(os.path.join(PROJECT, '_overlapped.pyd'))
def sdist_upload(self):
self.cleanup()
self.run_command(sys.executable, 'setup.py', 'sdist', 'upload')
def build_inplace(self, pyver):
print("Build for %s" % pyver)
self.build(pyver, 'build')
if WINDOWS:
if pyver.bits == 64:
arch = 'win-amd64'
else:
arch = 'win32'
build_dir = 'lib.%s-%s.%s' % (arch, pyver.major, pyver.minor)
src = os.path.join(self.root, 'build', build_dir,
PROJECT, '_overlapped.pyd')
dst = os.path.join(self.root, PROJECT, '_overlapped.pyd')
shutil.copyfile(src, dst)
def runtests(self, pyver):
print("Run tests on %s" % pyver)
if WINDOWS and not self.options.no_compile:
self.build_inplace(pyver)
release_env = dict(os.environ)
release_env.pop(DEBUG_ENV_VAR, None)
dbg_env = dict(os.environ)
dbg_env[DEBUG_ENV_VAR] = '1'
python = pyver.get_executable(self)
args = (python, 'runtests.py', '-r')
if self.release:
print("Run runtests.py in release mode on %s" % pyver)
self.run_command(*args, env=release_env)
print("Run runtests.py in debug mode on %s" % pyver)
self.run_command(*args, env=dbg_env)
if self.aiotest:
args = (python, 'run_aiotest.py')
if self.release:
print("Run aiotest in release mode on %s" % pyver)
self.run_command(*args, env=release_env)
print("Run aiotest in debug mode on %s" % pyver)
self.run_command(*args, env=dbg_env)
print("")
def _build_windows(self, pyver, cmd):
setenv, sdkver = self.windows_sdk_setenv(pyver)
temp = tempfile.NamedTemporaryFile(mode="w", suffix=".bat",
delete=False)
with temp:
temp.write("SETLOCAL EnableDelayedExpansion\n")
temp.write(self.quote_args(setenv) + "\n")
temp.write(BATCH_FAIL_ON_ERROR + "\n")
# Restore console colors: lightgrey on black
temp.write("COLOR 07\n")
temp.write("\n")
temp.write("SET DISTUTILS_USE_SDK=1\n")
temp.write("SET MSSDK=1\n")
temp.write("CD %s\n" % self.quote(self.root))
temp.write(self.quote_args(cmd) + "\n")
temp.write(BATCH_FAIL_ON_ERROR + "\n")
try:
if self.verbose:
print("Setup Windows SDK %s.%s" % sdkver)
print("+ " + ' '.join(cmd))
# SDK 7.1 uses the COLOR command which makes SetEnv.cmd failing
# if the stdout is not a TTY (if we redirect stdout into a file)
if self.verbose or sdkver >= (7, 1):
self.run_command(temp.name, verbose=False)
else:
self.check_output(temp.name, verbose=False)
finally:
os.unlink(temp.name)
def _build_unix(self, pyver, cmd):
self.check_output(*cmd)
def build(self, pyver, *cmds):
self.cleanup()
python = pyver.get_executable(self)
cmd = [python, 'setup.py'] + list(cmds)
if WINDOWS:
self._build_windows(pyver, cmd)
else:
self._build_unix(pyver, cmd)
def test_wheel(self, pyver):
print("Test building wheel package for %s" % pyver)
self.build(pyver, 'bdist_wheel')
def publish_wheel(self, pyver):
print("Build and publish wheel package for %s" % pyver)
self.build(pyver, 'bdist_wheel', 'upload')
def parse_options(self):
parser = optparse.OptionParser(
description="Run all unittests.",
usage="%prog [options] command")
parser.add_option(
'-v', '--verbose', action="store_true", dest='verbose',
default=0, help='verbose')
parser.add_option(
'-t', '--tag', type="str",
help='Mercurial tag or revision, required to release')
parser.add_option(
'-p', '--python', type="str",
help='Only build/test one specific Python version, ex: "2.7:32"')
parser.add_option(
'-C', "--no-compile", action="store_true",
help="Don't compile the module, this options implies --running",
default=False)
parser.add_option(
'-r', "--running", action="store_true",
help='Only use the running Python version',
default=False)
parser.add_option(
'--ignore', action="store_true",
help='Ignore local changes',
default=False)
self.options, args = parser.parse_args()
if len(args) == 1:
command = args[0]
else:
command = None
if self.options.no_compile:
self.options.running = True
if command == 'clean':
self.options.verbose = True
elif command == 'build':
self.options.running = True
elif command == 'test_wheel':
self.wheel = True
elif command == 'test':
self.test = True
elif command == 'release':
if not self.options.tag:
print("The release command requires the --tag option")
sys.exit(1)
self.release = True
self.wheel = True
self.test = True
self.upload = True
else:
if command:
print("Invalid command: %s" % command)
else:
parser.print_help()
print("")
print("Available commands:")
print("- build: build asyncio in place, imply --running")
print("- test: run tests")
print("- test_wheel: test building wheel packages")
print("- release: run tests and publish wheel packages,")
print(" require the --tag option")
print("- clean: cleanup the project")
sys.exit(1)
if self.options.python and self.options.running:
print("--python and --running options are exclusive")
sys.exit(1)
python = self.options.python
if python:
match = re.match("^([23])\.([0-9])/(32|64)$", python)
if not match:
print("Invalid Python version: %s" % python)
print('Format of a Python version: "x.y/bits"')
print("Example: 2.7/32")
sys.exit(1)
major = int(match.group(1))
minor = int(match.group(2))
bits = int(match.group(3))
self.python_versions = [PythonVersion(major, minor, bits)]
if self.options.running:
self.python_versions = [PythonVersion.running()]
self.verbose = self.options.verbose
self.command = command
def main(self):
self.parse_options()
print("Directory: %s" % self.root)
os.chdir(self.root)
if self.command == "clean":
self.cleanup()
sys.exit(1)
if self.command == "build":
if len(self.python_versions) != 1:
print("build command requires one specific Python version")
print("Use the --python command line option")
sys.exit(1)
pyver = self.python_versions[0]
self.build_inplace(pyver)
if (self.register or self.upload) and (not self.options.ignore):
lines = self.get_local_changes()
else:
lines = ()
if lines:
print("ERROR: Found local changes")
for line in lines:
print(line)
print("")
print("Revert local changes")
print("or use the --ignore command line option")
sys.exit(1)
hg_tag = self.options.tag
if hg_tag:
print("Update repository to revision %s" % hg_tag)
self.check_output(HG, 'update', hg_tag)
hg_rev = self.check_output(HG, 'id').rstrip()
if self.wheel:
for pyver in self.python_versions:
self.test_wheel(pyver)
if self.test:
for pyver in self.python_versions:
self.runtests(pyver)
if self.register:
self.run_command(sys.executable, 'setup.py', 'register')
if self.sdist:
self.sdist_upload()
if self.upload:
for pyver in self.python_versions:
self.publish_wheel(pyver)
hg_rev2 = self.check_output(HG, 'id').rstrip()
if hg_rev != hg_rev2:
print("ERROR: The Mercurial revision changed")
print("Before: %s" % hg_rev)
print("After: %s" % hg_rev2)
sys.exit(1)
print("")
print("Mercurial revision: %s" % hg_rev)
if self.command == 'build':
print("Inplace compilation done")
if self.wheel:
print("Compilation of wheel packages succeeded")
if self.test:
print("Tests succeeded")
if self.register:
print("Project registered on the Python cheeseshop (PyPI)")
if self.sdist:
print("Project source code uploaded to the Python "
"cheeseshop (PyPI)")
if self.upload:
print("Wheel packages uploaded to the Python cheeseshop (PyPI)")
for pyver in self.python_versions:
print("- %s" % pyver)
if __name__ == "__main__":
Release().main()
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe import _
class SignupDisabledError(frappe.PermissionError): pass
no_cache = True
def get_context(context):
# get settings from site config
context["title"] = "Login"
context["disable_signup"] = frappe.utils.cint(frappe.db.get_value("Website Settings", "Website Settings", "disable_signup"))
for provider in ("google", "github", "facebook"):
if get_oauth_keys(provider):
context["{provider}_login".format(provider=provider)] = get_oauth2_authorize_url(provider)
context["social_login"] = True
return context
oauth2_providers = {
"google": {
"flow_params": {
"name": "google",
"authorize_url": "https://accounts.google.com/o/oauth2/auth",
"access_token_url": "https://accounts.google.com/o/oauth2/token",
"base_url": "https://www.googleapis.com",
},
"redirect_uri": "/api/method/frappe.templates.pages.login.login_via_google",
"auth_url_data": {
"scope": "https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email",
"response_type": "code"
},
# relative to base_url
"api_endpoint": "oauth2/v2/userinfo"
},
"github": {
"flow_params": {
"name": "github",
"authorize_url": "https://github.com/login/oauth/authorize",
"access_token_url": "https://github.com/login/oauth/access_token",
"base_url": "https://api.github.com/"
},
"redirect_uri": "/api/method/frappe.templates.pages.login.login_via_github",
# relative to base_url
"api_endpoint": "user"
},
"facebook": {
"flow_params": {
"name": "facebook",
"authorize_url": "https://www.facebook.com/dialog/oauth",
"access_token_url": "https://graph.facebook.com/oauth/access_token",
"base_url": "https://graph.facebook.com"
},
"redirect_uri": "/api/method/frappe.templates.pages.login.login_via_facebook",
"auth_url_data": {
"display": "page",
"response_type": "code",
"scope": "email,public_profile"
},
# relative to base_url
"api_endpoint": "me"
}
}
def get_oauth_keys(provider):
"""get client_id and client_secret from database or conf"""
# try conf
keys = frappe.conf.get("{provider}_login".format(provider=provider))
if not keys:
# try database
social = frappe.get_doc("Social Login Keys", "Social Login Keys")
keys = {}
for fieldname in ("client_id", "client_secret"):
value = social.get("{provider}_{fieldname}".format(provider=provider, fieldname=fieldname))
if not value:
keys = {}
break
keys[fieldname] = value
return keys
def get_oauth2_authorize_url(provider):
flow = get_oauth2_flow(provider)
# relative to absolute url
data = { "redirect_uri": get_redirect_uri(provider) }
# additional data if any
data.update(oauth2_providers[provider].get("auth_url_data", {}))
return flow.get_authorize_url(**data)
def get_oauth2_flow(provider):
from rauth import OAuth2Service
# get client_id and client_secret
params = get_oauth_keys(provider)
# additional params for getting the flow
params.update(oauth2_providers[provider]["flow_params"])
# and we have setup the communication lines
return OAuth2Service(**params)
def get_redirect_uri(provider):
redirect_uri = oauth2_providers[provider]["redirect_uri"]
return frappe.utils.get_url(redirect_uri)
@frappe.whitelist(allow_guest=True)
def login_via_google(code):
login_via_oauth2("google", code, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_github(code):
login_via_oauth2("github", code)
@frappe.whitelist(allow_guest=True)
def login_via_facebook(code):
login_via_oauth2("facebook", code)
def login_via_oauth2(provider, code, decoder=None):
flow = get_oauth2_flow(provider)
args = {
"data": {
"code": code,
"redirect_uri": get_redirect_uri(provider),
"grant_type": "authorization_code"
}
}
if decoder:
args["decoder"] = decoder
session = flow.get_auth_session(**args)
api_endpoint = oauth2_providers[provider].get("api_endpoint")
info = session.get(api_endpoint).json()
if "verified_email" in info and not info.get("verified_email"):
frappe.throw(_("Email not verified with {1}").format(provider.title()))
login_oauth_user(info, provider=provider)
@frappe.whitelist(allow_guest=True)
def login_oauth_user(data=None, provider=None, email_id=None, key=None):
if email_id and key:
data = json.loads(frappe.db.get_temp(key))
data["email"] = email_id
elif not "email" in data:
# ask for user email
key = frappe.db.set_temp(json.dumps(data))
frappe.db.commit()
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = "/complete_signup?key=" + key
return
user = data["email"]
try:
update_oauth_user(user, data, provider)
except SignupDisabledError:
return frappe.respond_as_web_page("Signup is Disabled", "Sorry. Signup from Website is disabled.",
success=False, http_status_code=403)
frappe.local.login_manager.user = user
frappe.local.login_manager.post_login()
# redirect!
frappe.local.response["type"] = "redirect"
# the #desktop is added to prevent a facebook redirect bug
frappe.local.response["location"] = "/desk#desktop" if frappe.local.response.get('message') == 'Logged In' else "/"
# because of a GET request!
frappe.db.commit()
@frappe.whitelist()
def save_demo_user_id(user):
"""
save the email id of demo user
"""
frappe.db.sql("""insert into `tabDemo User Email IDs` values(%s);""",(user))
def update_oauth_user(user, data, provider):
if isinstance(data.get("location"), dict):
data["location"] = data.get("location").get("name")
save = False
if not frappe.db.exists("User", user):
# is signup disabled?
if frappe.utils.cint(frappe.db.get_single_value("Website Settings", "disable_signup")):
raise SignupDisabledError
save = True
user = frappe.new_doc("User")
user.update({
"doctype":"User",
"first_name": data.get("first_name") or data.get("given_name") or data.get("name"),
"last_name": data.get("last_name") or data.get("family_name"),
"email": data["email"],
"gender": (data.get("gender") or "").title(),
"enabled": 1,
"new_password": frappe.generate_hash(data["email"]),
"location": data.get("location"),
"user_type": "Website User",
"user_image": data.get("picture") or data.get("avatar_url")
})
else:
user = frappe.get_doc("User", user)
if provider=="facebook" and not user.get("fb_userid"):
save = True
user.update({
"fb_username": data.get("username"),
"fb_userid": data["id"],
"user_image": "https://graph.facebook.com/{id}/picture".format(id=data["id"])
})
elif provider=="google" and not user.get("google_userid"):
save = True
user.google_userid = data["id"]
elif provider=="github" and not user.get("github_userid"):
save = True
user.github_userid = data["id"]
user.github_username = data["login"]
if save:
user.ignore_permissions = True
user.no_welcome_mail = True
user.save()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for searching symbolic forms of density functionals."""
import time
from absl import logging
from jax.interpreters import xla
import numpy as np
from symbolic_functionals.syfes.dataset import dataset
from symbolic_functionals.syfes.symbolic import enhancement_factors
from symbolic_functionals.syfes.symbolic import evaluators
from symbolic_functionals.syfes.symbolic import mutators
from symbolic_functionals.syfes.symbolic import xc_functionals
def make_mutator(instruction_pool,
mutation_pool,
max_num_instructions,
max_num_bound_parameters,
num_fixed_instructions,
component_mutation_probabilities=None,
seed=None):
"""Constructs mutator for functional forms.
Args:
instruction_pool: Dict, the pool of possible instructions.
mutation_pool: Dict, the pool of possible mutation rules.
max_num_instructions: Integer, the maximum number of instructions.
max_num_bound_parameters: Integer, the maximum number of bound parameters.
num_fixed_instructions: Integer, the number of fixed instructions.
component_mutation_probabilities: Sequence of 3 floats, the probabilities
for mutating exchange, same-spin or opposite-spin component of the
functional.
seed: Integer, random seed.
Returns:
Instance of mutators.XCFunctionalMutator, the resulting mutator.
"""
return mutators.XCFunctionalMutator(
mutator_x=mutators.EnhancementFactorMutator(
instruction_pool=instruction_pool,
mutation_pool=mutation_pool,
max_num_instructions=max_num_instructions,
num_fixed_instructions=num_fixed_instructions,
max_num_bound_parameters=max_num_bound_parameters),
mutator_css=mutators.EnhancementFactorMutator(
instruction_pool=instruction_pool,
mutation_pool=mutation_pool,
max_num_instructions=max_num_instructions,
num_fixed_instructions=num_fixed_instructions,
max_num_bound_parameters=max_num_bound_parameters),
mutator_cos=mutators.EnhancementFactorMutator(
instruction_pool=instruction_pool,
mutation_pool=mutation_pool,
max_num_instructions=max_num_instructions,
num_fixed_instructions=num_fixed_instructions,
max_num_bound_parameters=max_num_bound_parameters),
component_mutation_probabilities=component_mutation_probabilities,
seed=seed)
def make_evaluators_with_mgcdb84_partitioning(
dataset_directory,
feature_names_x,
feature_names_css,
feature_names_cos,
spin_singlet=False,
targets='mgcdb84_ref',
num_targets=None,
omega=0.3,
alpha=1.0,
beta=-0.85,
eval_modes=('jit', 'onp', 'onp')):
"""Constructs evaluators based on mgcdb84 training and validation set.
Args:
dataset_directory: String, the directory to dataset.
feature_names_x: List of strings, the features for exchange enhancement
factor.
feature_names_css: List of strings, the features for same-spin correlation
enhancement factor.
feature_names_cos: List of strings, the features for opposite-spin
correlation enhancement factor.
spin_singlet: Boolean, if True, only spin unpolarized molecules will
be included in dataset.
targets: String, the targets used for evaluating WRMSD. Defaults to
'mgcdb84_ref', which computes target values from reference values given
by MCGDB84. Other supported values are:
* B97X: target values are exchange-correlation energies evaluated by
B97 exchange functional.
* B97: target values are exchange-correlation energies evaluated by
B97 functional.
num_targets: Integer, the number of targets used to construct train
or validation evaluator. Defaults to use all targets in MGCDB84 training
set and all targets in validation set.
omega: Float, RSH parameter for functional used in SCF calculations.
alpha: Float, RSH parameter for functional used in SCF calculations.
beta: Float, RSH parameter for functional used in SCF calculations.
Default values of omega, alpha, beta are those of wB97M-V functional
obtained with pyscf.dft.libxc.rsh_coeff('wb97m_v')
eval_modes: Sequence of 3 strings, evaluation mode for training, validation
and test evaluators. Possible values are onp, jnp and jit.
Returns:
List of 3 instances of evaluators.Evaluator, the evaluator for training,
validation and test losses.
"""
evaluator_list = []
for paritition_index, partition in enumerate(['train', 'validation', 'test']):
evaluator = evaluators.Evaluator.from_dataset(
subset=dataset.Dataset.load_mcgdb84_subset(
dataset_directory=dataset_directory,
mgcdb84_set=partition,
spin_singlet=spin_singlet,
nrow_property=num_targets),
feature_names_x=feature_names_x,
feature_names_css=feature_names_css,
feature_names_cos=feature_names_cos,
targets=targets,
omega=omega,
alpha=alpha,
beta=beta,
eval_mode=eval_modes[paritition_index])
logging.info('Evaluator on %s set constructed: %s', partition, evaluator)
evaluator_list.append(evaluator)
return evaluator_list
def make_evaluators_with_mgcdb84_type(
dataset_directory,
mcgdb84_types,
feature_names_x,
feature_names_css,
feature_names_cos,
train_validation_test_split=(0.6, 0.2, 0.2),
spin_singlet=False,
targets='mgcdb84_ref',
num_targets=None,
omega=0.3,
alpha=1.0,
beta=-0.85,
eval_modes=('jit', 'onp', 'onp')):
"""Constructs evaluators using given type of MGCDB84.
Data in given type will be combined and split into training, validation
and test sets, resulting in 3 evaluators.
Args:
dataset_directory: String, the directory to dataset.
mcgdb84_types: List of strings, the mgcdb84 types.
feature_names_x: List of strings, the features for exchange enhancement
factor.
feature_names_css: List of strings, the features for same-spin correlation
enhancement factor.
feature_names_cos: List of strings, the features for opposite-spin
correlation enhancement factor.
train_validation_test_split: Sequence of 3 floats, the fraction of training,
validation and test set.
spin_singlet: Boolean, if True, only spin unpolarized molecules will
be included in dataset.
targets: String, the targets used for evaluating WRMSD. Defaults to
'mgcdb84_ref', which computes target values from reference values given
by MCGDB84. Other supported values are:
* B97X: target values are exchange-correlation energies evaluated by
B97 exchange.
num_targets: Integer, the total number of targets used to construct
train, validation and test set evaluator. Defaults to use all targets with
specified data types.
omega: Float, RSH parameter for functional used in SCF calculations.
alpha: Float, RSH parameter for functional used in SCF calculations.
beta: Float, RSH parameter for functional used in SCF calculations.
Default values of omega, alpha, beta are those of wB97M-V functional
obtained with pyscf.dft.libxc.rsh_coeff('wb97m_v')
eval_modes: Sequence of 3 strings, evaluation mode for training, validation
and test evaluators. Possible values are onp, jnp and jit.
Returns:
List of 3 instances of evaluators.Evaluator, the evaluator for training,
validation and test losses.
Raises:
ValueError, if train_validation_test_split has wrong length
or train_validation_test_split contains negative values
or train_validation_test_split do not sum to 1.
"""
if (len(train_validation_test_split) != 3
or any(frac < 0. for frac in train_validation_test_split)
or abs(sum(train_validation_test_split) - 1.) > 1e-8):
raise ValueError(
'Invalid train_validation_test_split: ', train_validation_test_split)
subset = dataset.Dataset.load_mcgdb84_subset(
dataset_directory=dataset_directory,
mgcdb84_types=mcgdb84_types,
spin_singlet=spin_singlet,
nrow_property=num_targets)
property_dfs = np.split(
subset.property_df.sample(frac=1, random_state=0), [
int(train_validation_test_split[0] * subset.nrow_property),
int(sum(train_validation_test_split[:2]) * subset.nrow_property)
])
evaluator_list = []
for paritition_index, partition in enumerate(['train', 'validation', 'test']):
evaluator = evaluators.Evaluator.from_dataset(
subset=subset.get_subset(
property_df_subset=property_dfs[paritition_index]),
feature_names_x=feature_names_x,
feature_names_css=feature_names_css,
feature_names_cos=feature_names_cos,
targets=targets,
omega=omega,
alpha=alpha,
beta=beta,
eval_mode=eval_modes[paritition_index])
logging.info('Evaluator on %s set constructed: %s', partition, evaluator)
evaluator_list.append(evaluator)
return evaluator_list
def make_grid_evaluators(
features,
weights,
targets,
e_lda_x,
e_lda_css,
e_lda_cos,
signature,
train_validation_test_split=(0.6, 0.2, 0.2),
eval_modes=('jit', 'onp', 'onp')):
"""Constructs grid evaluators."""
if (len(train_validation_test_split) != 3
or any(frac < 0. for frac in train_validation_test_split)
or abs(sum(train_validation_test_split) - 1.) > 1e-8):
raise ValueError(
'Invalid train_validation_test_split: ', train_validation_test_split)
features = {feature_name: np.array(feature)
for feature_name, feature in features.items()}
weights = np.array(weights)
targets = np.array(targets)
e_lda_x = np.array(e_lda_x)
e_lda_css = np.array(e_lda_css)
e_lda_cos = np.array(e_lda_cos)
num_grids = len(weights)
grid_indices_partition = np.split(
np.random.RandomState(0).permutation(num_grids), [
int(train_validation_test_split[0] * num_grids),
int(sum(train_validation_test_split[:2]) * num_grids)
])
evaluator_list = []
for paritition_index, partition in enumerate(['train', 'validation', 'test']):
grid_indices = grid_indices_partition[paritition_index]
evaluator = evaluators.GridEvaluator(
# make copies to ensure memory layout is contiguous
features={feature_name: feature[grid_indices].copy()
for feature_name, feature in features.items()},
weights=weights[grid_indices].copy(),
targets=targets[grid_indices].copy(),
e_lda_x=e_lda_x[grid_indices].copy(),
e_lda_css=e_lda_css[grid_indices].copy(),
e_lda_cos=e_lda_cos[grid_indices].copy(),
signature=signature,
eval_mode=eval_modes[paritition_index])
logging.info(
'GridEvaluator on %s set constructed: %s', partition, evaluator)
evaluator_list.append(evaluator)
return evaluator_list
def make_random_functional(
mutator,
feature_names_x,
feature_names_css,
feature_names_cos,
num_shared_parameters,
num_variables,
num_instructions):
"""Makes a random functional with given specifications.
Args:
mutator: Instance of mutators.XCFunctionalMutator, the mutator used to
generate random instruction lists.
feature_names_x: Sequence of strings, the feature names for evaluating
exchange enhancement factor.
feature_names_css: Sequence of strings, the feature names for evaluating
same-spin correlation enhancement factor.
feature_names_cos: Sequence of strings, the feature names for evaluating
opposite-spin correlation enhancement factor.
num_shared_parameters: Integer or sequence of 3 integers, the number of
shared parameters for each enhancement factor. Defaults to None, which
uses the number of shared parameters of current enhancement factors.
num_variables: Integer or sequence of 3 integers, the number of variables
for each enhancement factor. Defaults to None, which uses the number
of variables of current enhancement factors.
num_instructions: Integer, the number of instructions for each enhancement
factor.
Returns:
Instance of xc_functionals.XCFunctional, the random functional.
"""
if num_shared_parameters is None or isinstance(num_shared_parameters, int):
num_shared_parameters_x = num_shared_parameters
num_shared_parameters_css = num_shared_parameters
num_shared_parameters_cos = num_shared_parameters
else:
(num_shared_parameters_x, num_shared_parameters_css,
num_shared_parameters_cos) = num_shared_parameters
if num_variables is None or isinstance(num_variables, int):
num_variables_x = num_variables
num_variables_css = num_variables
num_variables_cos = num_variables
else:
num_variables_x, num_variables_css, num_variables_cos = num_variables
f_x_base = enhancement_factors.f_empty.make_isomorphic_copy(
feature_names=feature_names_x,
num_shared_parameters=num_shared_parameters_x,
num_variables=num_variables_x)
f_css_base = enhancement_factors.f_empty.make_isomorphic_copy(
feature_names=feature_names_css,
num_shared_parameters=num_shared_parameters_css,
num_variables=num_variables_css)
f_cos_base = enhancement_factors.f_empty.make_isomorphic_copy(
feature_names=feature_names_cos,
num_shared_parameters=num_shared_parameters_cos,
num_variables=num_variables_cos)
return xc_functionals.XCFunctional(
f_x=enhancement_factors.EnhancementFactor(
feature_names=f_x_base.feature_names,
shared_parameter_names=f_x_base.shared_parameter_names,
variable_names=f_x_base.variable_names,
instruction_list=mutator.mutator_x.randomize_instruction_list(
f_x_base, num_instructions=num_instructions)[0]),
f_css=enhancement_factors.EnhancementFactor(
feature_names=f_css_base.feature_names,
shared_parameter_names=f_css_base.shared_parameter_names,
variable_names=f_css_base.variable_names,
instruction_list=mutator.mutator_css.randomize_instruction_list(
f_css_base, num_instructions=num_instructions)[0]),
f_cos=enhancement_factors.EnhancementFactor(
feature_names=f_cos_base.feature_names,
shared_parameter_names=f_cos_base.shared_parameter_names,
variable_names=f_cos_base.variable_names,
instruction_list=mutator.mutator_cos.randomize_instruction_list(
f_cos_base, num_instructions=num_instructions)[0]),
)
def train_functional(functional,
optimizer,
num_opt_trials,
parameters_init=None,
evaluator_validation=None,
evaluator_test=None,
clear_xla_cache=True):
"""Trains a given functional form.
Args:
functional: Instance of xc_functionals.XCFunctional, the functional form
with parameters to be determined by optimization.
optimizer: Instance of optimizers.CMAESOptimizer, the optimizer.
num_opt_trials: Integer, the number of trials for optimization. The final
results will be determined by the trial with minimum training loss.
parameters_init: Dict, initial parameters. If not specified, random initial
parameters will be used.
evaluator_validation: Instance of evaluators.Evaluator, the evaluator
of validation loss. If present, the validation loss will be computed.
evaluator_test: Instance of evaluators.Evaluator, the evaluator
of test loss. If present, the test loss will be computed.
clear_xla_cache: Boolean, if True, the XLA cache will be cleared. Only
relevant if jax.jit is used for evaluations.
Returns:
Dict, the results of optimization, see CMAESOptimizer.run_optimization.
If evaluator_validation is specified, the dict will include an additional
key 'validation_loss' for validation loss.
"""
if clear_xla_cache:
# NOTE(htm): jax.jit will cache compiled functions with different static
# arguments. Currently, one has to call the following protected function
# to clear jit cache.
xla._xla_callable.cache_clear() # pylint: disable=protected-access
# optimize the functional form with training set
start = time.time()
results = optimizer.run_optimization(
functional,
num_trials=num_opt_trials,
parameters_init=parameters_init)
results['train_loss'] = results.pop('fbest')
results['train_time'] = time.time() - start
evaluator_list = []
if evaluator_validation is not None:
evaluator_list.append(('validation', evaluator_validation))
if evaluator_test is not None:
evaluator_list.append(('test', evaluator_test))
for prefix, evaluator in evaluator_list:
start = time.time()
if results['parameters'] is not None:
loss = evaluator.get_eval_wrmsd(functional)(**results['parameters'])
else:
loss = np.nan
results[f'{prefix}_loss'] = loss
results[f'{prefix}_time'] = time.time() - start
logging.info('%s WRMSD (kcal/mol): %s', prefix.capitalize(), loss)
logging.info('Evaluation time for %s WRMSD: %s',
prefix, results[f'{prefix}_time'])
return results
| |
from common import find_mxnet
import numpy as np
import os, sys
print sys.path
import mxnet as mx
import argparse
parser = argparse.ArgumentParser(description="Train RNN on Penn Tree Bank",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, help='the input data directory')
parser.add_argument('--sequence-lens', type=str, default="32", help='the sequence lengths, e.g "8,16,32,64,128"')
parser.add_argument('--num-examples', type=str, help='Flag for consistancy, no use in rnn')
parser.add_argument('--test', default=False, action='store_true',
help='whether to do testing instead of training')
parser.add_argument('--model-prefix', type=str, default=None,
help='path to save/load model')
parser.add_argument('--load-epoch', type=int, default=0,
help='load from epoch')
parser.add_argument('--num-layers', type=int, default=2,
help='number of stacked RNN layers')
parser.add_argument('--num-hidden', type=int, default=256,
help='hidden layer size')
parser.add_argument('--num-embed', type=int, default=256,
help='embedding layer size')
parser.add_argument('--bidirectional', type=bool, default=False,
help='whether to use bidirectional layers')
parser.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ' \
'Increase batch size when using multiple gpus for best performance.')
parser.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
parser.add_argument('--num-epochs', type=int, default=25,
help='max num of epochs')
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate')
parser.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
parser.add_argument('--mom', type=float, default=0.0,
help='momentum for sgd')
parser.add_argument('--wd', type=float, default=0.00001,
help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--disp-batches', type=int, default=50,
help='show progress for every n batches')
parser.add_argument('--stack-rnn', default=False,
help='stack fused RNN cells to reduce communication overhead')
parser.add_argument('--dropout', type=float, default='0.0',
help='dropout probability (1.0 - keep probability)')
args = parser.parse_args()
#buckets = [64]
buckets = [int(i) for i in args.sequence_lens.split(',')]
start_label = 1
invalid_label = 0
data_dir = os.environ["HOME"] + "/data/mxnet/ptb/" if args.data_dir is None else args.data_dir
def tokenize_text(fname, vocab=None, invalid_label=-1, start_label=0):
lines = open(fname).readlines()
lines = [filter(None, i.split(' ')) for i in lines]
sentences, vocab = mx.rnn.encode_sentences(lines, vocab=vocab, invalid_label=invalid_label, start_label=start_label)
return sentences, vocab
def get_data(layout):
train_sent, vocab = tokenize_text(data_dir + "ptb.train.txt", start_label=start_label,
invalid_label=invalid_label)
val_sent, _ = tokenize_text(data_dir + "ptb.test.txt", vocab=vocab, start_label=start_label,
invalid_label=invalid_label)
data_train = mx.rnn.BucketSentenceIter(train_sent, args.batch_size, buckets=buckets, invalid_label=invalid_label, layout=layout)
data_val = mx.rnn.BucketSentenceIter(val_sent, args.batch_size, buckets=buckets, invalid_label=invalid_label, layout=layout)
return data_train, data_val, vocab
def train(args):
data_train, data_val, vocab = get_data('TN')
sample_size = 0
for x in data_train.data:
sample_size += len(x)
print("len of data train===================== " + str(sample_size))
if args.stack_rnn:
cell = mx.rnn.SequentialRNNCell()
for i in range(args.num_layers):
cell.add(mx.rnn.FusedRNNCell(args.num_hidden, num_layers=1,
mode='lstm', prefix='lstm_l%d'%i,
bidirectional=args.bidirectional))
if args.dropout > 0 and i < args.num_layers - 1:
cell.add(mx.rnn.DropoutCell(args.dropout, prefix='lstm_d%d'%i))
else:
cell = mx.rnn.FusedRNNCell(args.num_hidden, num_layers=args.num_layers, dropout=args.dropout,
mode='lstm', bidirectional=args.bidirectional)
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len(vocab), output_dim=args.num_embed,name='embed')
output, _ = cell.unroll(seq_len, inputs=embed, merge_outputs=True, layout='TNC')
pred = mx.sym.Reshape(output,
shape=(-1, args.num_hidden*(1+args.bidirectional)))
pred = mx.sym.FullyConnected(data=pred, num_hidden=len(vocab), name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
if args.gpus:
contexts = [mx.gpu(int(i)) for i in args.gpus.split(',')]
else:
contexts = mx.cpu(0)
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = data_train.default_bucket_key,
context = contexts)
if args.load_epoch:
_, arg_params, aux_params = mx.rnn.load_rnn_checkpoint(
cell, args.model_prefix, args.load_epoch)
else:
arg_params = None
aux_params = None
opt_params = {
'learning_rate': args.lr,
'wd': args.wd,
'clip_gradient': 5.0
}
if args.optimizer not in ['adadelta', 'adagrad', 'adam', 'rmsprop']:
opt_params['momentum'] = args.mom
print str(int((sample_size-args.batch_size)/args.batch_size))
model.fit(
train_data = data_train,
eval_data = data_val,
eval_metric = mx.metric.Perplexity(invalid_label),
kvstore = args.kv_store,
optimizer = args.optimizer,
optimizer_params = opt_params,
#initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
#initializer = mx.initializer.Uniform(scale=0.1),
initializer = mx.init.Uniform(scale=0.1),
arg_params = arg_params,
aux_params = aux_params,
begin_epoch = args.load_epoch,
num_epoch = args.num_epochs,
#batch_end_callback = mx.callback.Speedometer(args.batch_size, args.disp_batches),
batch_end_callback = mx.callback.Speedometer(args.batch_size, int((sample_size-args.batch_size)/args.batch_size) - 1),
epoch_end_callback = mx.rnn.do_rnn_checkpoint(cell, args.model_prefix, 1)
if args.model_prefix else None)
def test(args):
assert args.model_prefix, "Must specifiy path to load from"
_, data_val, vocab = get_data('NT')
if not args.stack_rnn:
stack = mx.rnn.FusedRNNCell(args.num_hidden, num_layers=args.num_layers,
mode='lstm', bidirectional=args.bidirectional).unfuse()
else:
stack = mx.rnn.SequentialRNNCell()
for i in range(args.num_layers):
cell = mx.rnn.LSTMCell(num_hidden=args.num_hidden, prefix='lstm_%dl0_'%i)
if args.bidirectional:
cell = mx.rnn.BidirectionalCell(
cell,
mx.rnn.LSTMCell(num_hidden=args.num_hidden, prefix='lstm_%dr0_'%i),
output_prefix='bi_lstm_%d'%i)
stack.add(cell)
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len(vocab),
output_dim=args.num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs,
shape=(-1, args.num_hidden*(1+args.bidirectional)))
pred = mx.sym.FullyConnected(data=pred, num_hidden=len(vocab), name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
if args.gpus:
contexts = [mx.gpu(int(i)) for i in args.gpus.split(',')]
else:
contexts = mx.cpu(0)
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = data_val.default_bucket_key,
context = contexts)
model.bind(data_val.provide_data, data_val.provide_label, for_training=False)
# note here we load using SequentialRNNCell instead of FusedRNNCell.
_, arg_params, aux_params = mx.rnn.load_rnn_checkpoint(stack, args.model_prefix, args.load_epoch)
model.set_params(arg_params, aux_params)
model.score(data_val, mx.metric.Perplexity(invalid_label),
batch_end_callback=mx.callback.Speedometer(args.batch_size, 5))
if __name__ == '__main__':
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
if args.num_layers >= 4 and len(args.gpus.split(',')) >= 4 and not args.stack_rnn:
print('WARNING: stack-rnn is recommended to train complex model on multiple GPUs')
if args.test:
# Demonstrates how to load a model trained with CuDNN RNN and predict
# with non-fused MXNet symbol
test(args)
else:
train(args)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet.event
import eventlet.queue
import eventlet.timeout
import mock
import testtools
from tacker.agent.linux import async_process
from tacker.agent.linux import utils
from tacker.tests import base
_marker = ()
class TestAsyncProcess(base.BaseTestCase):
def setUp(self):
super(TestAsyncProcess, self).setUp()
self.proc = async_process.AsyncProcess(['fake'])
def test_construtor_raises_exception_for_negative_respawn_interval(self):
with testtools.ExpectedException(ValueError):
async_process.AsyncProcess(['fake'], respawn_interval=-1)
def test__spawn(self):
expected_process = 'Foo'
proc = self.proc
with mock.patch.object(utils, 'create_process') as mock_create_process:
mock_create_process.return_value = [expected_process, None]
with mock.patch('eventlet.spawn') as mock_spawn:
proc._spawn()
self.assertIsInstance(proc._kill_event, eventlet.event.Event)
self.assertEqual(proc._process, expected_process)
mock_spawn.assert_has_calls([
mock.call(proc._watch_process,
proc._read_stdout,
proc._kill_event),
mock.call(proc._watch_process,
proc._read_stderr,
proc._kill_event),
])
self.assertEqual(len(proc._watchers), 2)
def test__handle_process_error_kills_with_respawn(self):
with mock.patch.object(self.proc, '_kill') as kill:
self.proc._handle_process_error()
kill.assert_has_calls(mock.call(respawning=False))
def test__handle_process_error_kills_without_respawn(self):
self.proc.respawn_interval = 1
with mock.patch.object(self.proc, '_kill') as kill:
with mock.patch.object(self.proc, '_spawn') as spawn:
with mock.patch('eventlet.sleep') as sleep:
self.proc._handle_process_error()
kill.assert_has_calls(mock.call(respawning=True))
sleep.assert_has_calls(mock.call(self.proc.respawn_interval))
spawn.assert_called_once_with()
def _test__watch_process(self, callback, kill_event):
self.proc._kill_event = kill_event
# Ensure the test times out eventually if the watcher loops endlessly
with eventlet.timeout.Timeout(5):
with mock.patch.object(self.proc,
'_handle_process_error') as func:
self.proc._watch_process(callback, kill_event)
if not kill_event.ready():
func.assert_called_once_with()
def test__watch_process_exits_on_callback_failure(self):
self._test__watch_process(lambda: False, eventlet.event.Event())
def test__watch_process_exits_on_exception(self):
def foo():
raise Exception('Error!')
self._test__watch_process(foo, eventlet.event.Event())
def test__watch_process_exits_on_sent_kill_event(self):
kill_event = eventlet.event.Event()
kill_event.send()
self._test__watch_process(None, kill_event)
def _test_read_output_queues_and_returns_result(self, output):
queue = eventlet.queue.LightQueue()
mock_stream = mock.Mock()
with mock.patch.object(mock_stream, 'readline') as mock_readline:
mock_readline.return_value = output
result = self.proc._read(mock_stream, queue)
if output:
self.assertEqual(output, result)
self.assertEqual(output, queue.get_nowait())
else:
self.assertFalse(result)
self.assertTrue(queue.empty())
def test__read_queues_and_returns_output(self):
self._test_read_output_queues_and_returns_result('foo')
def test__read_returns_none_for_missing_output(self):
self._test_read_output_queues_and_returns_result('')
def test_start_raises_exception_if_process_already_started(self):
self.proc._kill_event = True
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.start()
def test_start_invokes__spawn(self):
with mock.patch.object(self.proc, '_spawn') as mock_start:
self.proc.start()
mock_start.assert_called_once_with()
def test__iter_queue_returns_empty_list_for_empty_queue(self):
result = list(self.proc._iter_queue(eventlet.queue.LightQueue()))
self.assertEqual(result, [])
def test__iter_queue_returns_queued_data(self):
queue = eventlet.queue.LightQueue()
queue.put('foo')
result = list(self.proc._iter_queue(queue))
self.assertEqual(result, ['foo'])
def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type):
expected_value = 'foo'
with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue:
mock_iter_queue.return_value = expected_value
target_func = getattr(self.proc, 'iter_%s' % output_type, None)
value = target_func()
self.assertEqual(value, expected_value)
queue = getattr(self.proc, '_%s_lines' % output_type, None)
mock_iter_queue.assert_called_with(queue)
def test_iter_stdout(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stdout')
def test_iter_stderr(self):
self._test_iter_output_calls_iter_queue_on_output_queue('stderr')
def _test__kill(self, respawning, pid=None):
with mock.patch.object(self.proc, '_kill_event') as mock_kill_event:
with mock.patch.object(self.proc, '_get_pid_to_kill',
return_value=pid):
with mock.patch.object(self.proc,
'_kill_process') as mock_kill_process:
self.proc._kill(respawning)
if respawning:
self.assertIsNotNone(self.proc._kill_event)
else:
self.assertIsNone(self.proc._kill_event)
mock_kill_event.send.assert_called_once_with()
if pid:
mock_kill_process.assert_called_once_with(pid)
def test__kill_when_respawning_does_not_clear_kill_event(self):
self._test__kill(True)
def test__kill_when_not_respawning_clears_kill_event(self):
self._test__kill(False)
def test__kill_targets_process_for_pid(self):
self._test__kill(False, pid='1')
def _test__get_pid_to_kill(self, expected=_marker,
root_helper=None, pids=None):
def _find_child_pids(x):
if not pids:
return []
pids.pop(0)
return pids
if root_helper:
self.proc.root_helper = root_helper
with mock.patch.object(self.proc, '_process') as mock_process:
with mock.patch.object(mock_process, 'pid') as mock_pid:
with mock.patch.object(utils, 'find_child_pids',
side_effect=_find_child_pids):
actual = self.proc._get_pid_to_kill()
if expected is _marker:
expected = mock_pid
self.assertEqual(expected, actual)
def test__get_pid_to_kill_returns_process_pid_without_root_helper(self):
self._test__get_pid_to_kill()
def test__get_pid_to_kill_returns_child_pid_with_root_helper(self):
self._test__get_pid_to_kill(expected='2', pids=['1', '2'],
root_helper='a')
def test__get_pid_to_kill_returns_last_child_pid_with_root_Helper(self):
self._test__get_pid_to_kill(expected='3', pids=['1', '2', '3'],
root_helper='a')
def test__get_pid_to_kill_returns_none_with_root_helper(self):
self._test__get_pid_to_kill(expected=None, root_helper='a')
def _test__kill_process(self, pid, expected, exception_message=None):
self.proc.root_helper = 'foo'
if exception_message:
exc = RuntimeError(exception_message)
else:
exc = None
with mock.patch.object(utils, 'execute',
side_effect=exc) as mock_execute:
actual = self.proc._kill_process(pid)
self.assertEqual(expected, actual)
mock_execute.assert_called_with(['kill', '-9', pid],
root_helper=self.proc.root_helper)
def test__kill_process_returns_true_for_valid_pid(self):
self._test__kill_process('1', True)
def test__kill_process_returns_true_for_stale_pid(self):
self._test__kill_process('1', True, 'No such process')
def test__kill_process_returns_false_for_execute_exception(self):
self._test__kill_process('1', False, 'Invalid')
def test_stop_calls_kill(self):
self.proc._kill_event = True
with mock.patch.object(self.proc, '_kill') as mock_kill:
self.proc.stop()
mock_kill.assert_called_once_with()
def test_stop_raises_exception_if_already_started(self):
with testtools.ExpectedException(async_process.AsyncProcessException):
self.proc.stop()
| |
"""
Minecraft server instance representation
"""
import asyncio
import fileinput
import glob
import hashlib
import json
import logging
import os
import os.path
import re
import shlex
import requests
from . import errors
class Server(object):
"""
A Minecraft server instance
"""
PROPERTIES_FILE = 'server.properties'
PROPERTIES_REGEX = re.compile(r'^([a-zA-Z0-9\-]+)=([^#]*)( *#.*)?$')
PROPERTIES_BOOL_REGEX = re.compile(r'^(true|false)$', re.IGNORECASE)
PROPERTIES_INT_REGEX = re.compile(r'^([0-9]+)$')
SETTINGS_FILE = 'mymcadmin.settings'
VERSION_URL = 'https://launchermeta.mojang.com/mc/game/version_manifest.json'
def __init__(self, path):
"""
Create an instance of the Minecraft server at the given file path.
This does not create a new Minecraft server, instead its used to model
a server.
"""
self._path = path
self._cache = {}
self._properties_file = os.path.join(path, Server.PROPERTIES_FILE)
self._properties = None
self._settings_file = os.path.join(path, Server.SETTINGS_FILE)
self._settings = None
@property
def path(self):
"""
Get the file path of the server
"""
return self._path
@property
def server_id(self):
"""
Get the server server ID
"""
return os.path.basename(self._path)
@property
def java(self):
"""
Get the Java binary to use
"""
if 'java' not in self._cache:
self._cache['java'] = self.settings.get('java', 'java')
return self._cache['java']
@property
def jar(self):
"""
Get the server Jar to run
"""
if 'jar' not in self._cache and 'jar' in self.settings:
self._cache['jar'] = self.settings['jar']
if 'jar' not in self._cache:
jars = glob.glob(os.path.join(self._path, '*.jar'))
if len(jars) == 0:
raise errors.ServerError('No server jar could be found')
elif len(jars) > 1:
raise errors.ServerError('Unable to determine server jar')
self._cache['jar'] = jars[0]
return self._cache['jar']
@property
def command_args(self):
"""
Get the command line arguments for starting the server
"""
command_args = [self.java]
command_args += [
shlex.quote(arg)
for arg in self.settings.get('jvm_args', [])
]
command_args += ['-jar', shlex.quote(self.jar)]
command_args += [
shlex.quote(arg)
for arg in self.settings.get('args', [])
]
return command_args
@property
def properties(self):
"""
Get the Minecraft server properties defined in the server.properties
file
"""
if not self._properties:
try:
with open(self._properties_file, 'r') as props_file:
props = props_file.readlines()
except FileNotFoundError:
raise errors.ServerError(
'Server properties file could not be found. ' +
'Try starting the server first to generate one.'
)
self._properties = {}
for line in props:
match = Server.PROPERTIES_REGEX.match(line.strip())
if not match:
continue
name, value, _ = match.groups()
self._properties[name] = Server._convert_property_value(value)
return self._properties
@property
def settings(self):
"""
Get the MyMCAdmin settings for this server that are defined in the
mymcadmin.settings file
"""
if not self._settings:
try:
with open(self._settings_file, 'r') as settings_file:
self._settings = json.load(settings_file)
except FileNotFoundError:
raise errors.ServerSettingsError(
'Server settings file (mymcadmin.settings) could not be ' +
'found.'
)
return self._settings
def start(self):
"""
Start the Minecraft server
"""
command_args = self.command_args
logging.info('Starting server with: %s', command_args)
return asyncio.create_subprocess_exec(
*command_args,
cwd = self.path,
stdin = asyncio.subprocess.PIPE,
stdout = asyncio.subprocess.PIPE,
stderr = asyncio.subprocess.PIPE,
)
def save_settings(self):
"""
Save any changes to the server settings to disk
"""
logging.info('Saving settings for %s to disk', self.server_id)
tmp_file = self._settings_file + '.tmp'
with open(tmp_file, 'w') as file_handle:
json.dump(
self.settings,
file_handle,
indent = '\t',
)
os.replace(tmp_file, self._settings_file)
logging.info('Settings successfully saved')
@classmethod
def list_versions(
cls,
snapshots = True,
releases = True,
betas = True,
alphas = True):
"""
List all available server versions
"""
def type_filter(version_filter, versions):
"""
Filter out versions of a specific type
"""
return [
v for v in versions
if v.get('type') != version_filter
]
resp = requests.get(cls.VERSION_URL)
if not resp.ok:
raise errors.MyMCAdminError('Unable to retrieve version list')
versions = resp.json()
latest = versions['latest']
all_versions = versions['versions']
if not snapshots:
del latest['snapshot']
all_versions = type_filter('snapshot', all_versions)
if not releases:
del latest['release']
all_versions = type_filter('release', all_versions)
if not betas:
all_versions = type_filter('old_beta', all_versions)
if not alphas:
all_versions = type_filter('old_alpha', all_versions)
return {
'latest': latest,
'versions': all_versions,
}
@classmethod
def get_version_info(cls, version = None):
"""
Get information about a specific Minecraft server version
"""
versions = cls.list_versions()
if version is None:
version = versions['latest']['release']
versions = [
v
for v in versions['versions']
if v['id'] == version
]
if len(versions) == 0:
raise errors.VersionDoesNotExistError(version)
version = versions[0]
version_url = version['url']
resp = requests.get(version_url)
if not resp.ok:
raise errors.MyMCAdminError(
'Unable to retrieve version information for {}',
version,
)
return resp.json()
@classmethod
def download_server_jar(cls, version_id = None, path = None):
"""
Download a server Jar based on its version ID
"""
if path is None:
path = os.getcwd()
version = cls.get_version_info(version_id)
if version_id is None:
version_id = version['id']
jar_path = os.path.join(
path,
'minecraft_server_{}.jar'.format(version_id),
)
downloads = version['downloads']
if 'server' not in downloads:
raise errors.MyMCAdminError('Version does not support multiplayer')
dl_info = downloads['server']
dl_url = dl_info['url']
dl_sha1 = dl_info['sha1']
jar_resp = requests.get(dl_url, stream = True)
if not jar_resp.ok:
raise errors.MyMCAdminError('Unable to download server jar')
sha1 = hashlib.sha1()
with open(jar_path, 'wb') as jar_file:
for chunk in jar_resp.iter_content(chunk_size = 1024):
# Ignore keep-alive chunks
if not chunk:
continue
jar_file.write(chunk)
sha1.update(chunk)
jar_sha1 = sha1.hexdigest()
if jar_sha1 != dl_sha1:
raise errors.MyMCAdminError(
'Downloaded server jar\'s sha1 did not match the expected value. ' +
'Was {}, should be {}.',
jar_sha1,
dl_sha1,
)
return jar_path
@classmethod
def agree_to_eula(cls, path = None):
"""
Accepts Mojang's EULA
"""
if path is None:
path = 'eula.txt'
else:
path = os.path.join(path, 'eula.txt')
with fileinput.FileInput(path, inplace = True, backup = '.bak') as file_handle:
for line in file_handle:
print(
re.sub(
r'FALSE',
'TRUE',
line,
flags = re.IGNORECASE,
),
end = '',
)
@classmethod
def generate_default_settings(cls, path = None, jar = None):
"""
Generates a default settings file for a server
"""
if path is None:
path = 'mymcadmin.settings'
else:
path = os.path.join(path, 'mymcadmin.settings')
default_settings = {
'java': 'java',
'jvm_args': [],
'args': ['nogui'],
'autostart': True,
}
if jar is not None:
default_settings['jar'] = jar
with open(path, 'w') as file_handle:
json.dump(
default_settings,
file_handle,
indent = '\t',
)
@classmethod
def _convert_property_value(cls, value):
"""
Convert a value from the properties value to its correct type. IE
integers are converted to ints, true/false to boolean, etc.
"""
if value == '':
return None
elif cls.PROPERTIES_BOOL_REGEX.match(value):
return value.lower() == 'true'
elif cls.PROPERTIES_INT_REGEX.match(value):
return int(value)
else:
return value
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test module configuration script."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import cStringIO
import logging
import os
import shutil
import sys
import tempfile
import traceback
import unittest
import appengine_config
from common import yaml_files
from scripts import modules as module_config
class TestWithTempDir(unittest.TestCase):
def setUp(self):
super(TestWithTempDir, self).setUp()
self._tmpdir = tempfile.mkdtemp()
def tearDown(self):
super(TestWithTempDir, self).tearDown()
shutil.rmtree(self._tmpdir, ignore_errors=True)
def _dump_dir(self, _, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if not os.path.isdir(path):
print '-------------------------', path
with open(path) as fp:
print fp.read()
def _dump_tree(self):
os.path.walk(self._tmpdir, self._dump_dir, None)
def _write_content(self, path, content):
with open(path, 'w') as fp:
for line in content:
fp.write(line)
def _assert_content_equals(self, path, expected_lines):
with open(path) as fp:
actual_lines = fp.readlines()
self.assertEquals(expected_lines, actual_lines)
class ManipulateAppYamlFileTest(TestWithTempDir):
def setUp(self):
super(ManipulateAppYamlFileTest, self).setUp()
self._yaml_path = os.path.join(self._tmpdir, 'app.yaml')
self._minimal_content = [
'\n',
'\n',
'libraries:\n',
'- name: jinja2\n',
' version: "2.6"\n',
'\n',
'env_variables:\n',
' FOO: bar\n',
' BAR: bleep\n',
]
def test_read_write_unchanged(self):
parsed_content = (
'\n'
'\n'
'libraries:\n'
'- name: jinja2\n'
' version: "2.6"\n'
'\n'
'env_variables:\n'
' FOO: bar\n'
'scalar_str: "string"\n'
'scalar_int: 123\n'
'scalar_bool: true\n'
'dict:\n'
' foo1: bar\n'
' foo2: 123\n'
' foo3: true\n'
'list:\n'
'- name: blah\n'
' value: 123\n'
'- name: blahblah\n'
'- value: 999\n'
)
with open(self._yaml_path, 'w') as fp:
fp.write(parsed_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.write()
with open(self._yaml_path) as fp:
written_content = fp.read()
self.assertEquals(parsed_content, written_content)
def test_get_env_var(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
self.assertEquals('bar', app_yaml.get_env('FOO'))
self.assertEquals('bleep', app_yaml.get_env('BAR'))
def test_add_env_var(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.set_env('BAZ', 'bar')
self.assertEquals('bar', app_yaml.get_env('BAZ'))
app_yaml.write()
expected = self._minimal_content + [' BAZ: bar\n']
self._assert_content_equals(self._yaml_path, expected)
def test_overwrite_env_var(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.set_env('FOO', 'foo')
self.assertEquals('foo', app_yaml.get_env('FOO'))
app_yaml.write()
expected = (
self._minimal_content[:7] +
[' FOO: foo\n'] +
self._minimal_content[8:])
self._assert_content_equals(self._yaml_path, expected)
def test_clear_env_var(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.set_env('BAR', '')
self.assertIsNone(app_yaml.get_env('BAR'))
app_yaml.write()
expected = self._minimal_content[:-1]
self._assert_content_equals(self._yaml_path, expected)
def test_require_existing_library(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.require_library('jinja2', '2.6')
app_yaml.write()
expected = self._minimal_content
self._assert_content_equals(self._yaml_path, expected)
def test_require_new_library(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
app_yaml.require_library('frammis', '1.2')
app_yaml.write()
expected = (
self._minimal_content[:3] +
['- name: frammis\n',
' version: "1.2"\n'] +
self._minimal_content[3:])
self._assert_content_equals(self._yaml_path, expected)
def test_require_different_version_of_library(self):
self._write_content(self._yaml_path, self._minimal_content)
app_yaml = yaml_files.AppYamlFile(self._yaml_path)
with self.assertRaises(ValueError):
app_yaml.require_library('jinja2', '2.1')
class ModuleManifestTest(TestWithTempDir):
def setUp(self):
super(ModuleManifestTest, self).setUp()
self._manifest_path = os.path.join(self._tmpdir,
module_config._MANIFEST_NAME)
def test_manifest_must_contain_module(self):
with open(self._manifest_path, 'w') as fp:
fp.write('foo: bar\n')
with self.assertRaises(KeyError):
# pylint: disable=expression-not-assigned
yaml_files.ModuleManifest(self._manifest_path).module_name
def test_module_name_must_name_full_python_module(self):
with open(self._manifest_path, 'w') as fp:
fp.write('module_name: bar\n')
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
yaml_files.ModuleManifest(self._manifest_path).module_name
def test_module_name_must_start_with_modules(self):
with open(self._manifest_path, 'w') as fp:
fp.write('module_name: bar.baz\n')
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
yaml_files.ModuleManifest(self._manifest_path).module_name
def test_manifest_must_have_container_version(self):
with open(self._manifest_path, 'w') as fp:
fp.write('module_name: modules.bar.bar_module\n')
with self.assertRaises(KeyError):
# pylint: disable=expression-not-assigned
yaml_files.ModuleManifest(self._manifest_path).module_name
def test_manifest_must_have_tests(self):
with open(self._manifest_path, 'w') as fp:
fp.write(
'module_name: modules.bar.bar_module\n'
'container_version: 1.3\n'
)
with self.assertRaises(KeyError):
# pylint: disable=expression-not-assigned
yaml_files.ModuleManifest(self._manifest_path).module_name
def test_minimal_manifest(self):
with open(self._manifest_path, 'w') as fp:
fp.write(
'module_name: modules.foo.foo_module\n'
'container_version: 1.2.3\n'
'tests:\n'
' this: 1\n'
' that: 2\n')
manifest = yaml_files.ModuleManifest(self._manifest_path)
self.assertEquals(manifest.module_name, 'foo')
self.assertEquals(manifest.main_module, 'modules.foo.foo_module')
self.assertEquals(manifest.third_party_libraries, {})
self.assertEquals(manifest.appengine_libraries, {})
self.assertEquals(manifest.tests, {'this': 1, 'that': 2})
def test_version_compatibility(self):
with open(self._manifest_path, 'w') as fp:
fp.write(
'module_name: modules.foo.foo_module\n'
'container_version: 1.2.3\n'
'tests:\n'
' this: 1\n'
' that: 2\n')
manifest = yaml_files.ModuleManifest(self._manifest_path)
manifest.assert_version_compatibility('1.2.3')
manifest.assert_version_compatibility('1.2.4')
manifest.assert_version_compatibility('1.3.0')
manifest.assert_version_compatibility('2.0.0')
with self.assertRaises(ValueError):
manifest.assert_version_compatibility('1.2.2')
with self.assertRaises(ValueError):
manifest.assert_version_compatibility('1.1.9')
with self.assertRaises(ValueError):
manifest.assert_version_compatibility('0.9.9')
class ModuleIncorporationTest(TestWithTempDir):
def _make_module(self, module_dir, module_name):
with open(os.path.join(module_dir, '__init__.py'), 'w'):
pass
with open(os.path.join(module_dir, module_name), 'w') as fp:
fp.write(
'from models import custom_modules\n'
'def register_module():\n'
' return custom_modules.Module("x", "x", [], [])'
)
def setUp(self):
super(ModuleIncorporationTest, self).setUp()
self.foo_dir = os.path.join(self._tmpdir, 'foo')
self.foo_src_dir = os.path.join(self.foo_dir, 'src')
self.foo_scripts_dir = os.path.join(self.foo_dir, 'scripts')
self.bar_dir = os.path.join(self._tmpdir, 'bar')
self.bar_src_dir = os.path.join(self.bar_dir, 'src')
self.bar_scripts_dir = os.path.join(self.bar_dir, 'scripts')
self.cb_dir = os.path.join(self._tmpdir, 'coursebuilder')
self.cb_modules_dir = os.path.join(self.cb_dir, 'modules')
self.scripts_dir = os.path.join(self.cb_dir, 'scripts')
self.lib_dir = os.path.join(self.cb_dir, 'lib')
self.modules_dir = os.path.join(self._tmpdir,
'coursebuilder_resources', 'modules')
for dirname in (self.foo_dir, self.foo_src_dir, self.foo_scripts_dir,
self.bar_dir, self.bar_src_dir, self.bar_scripts_dir,
self.cb_dir, self.scripts_dir, self.lib_dir,
self.cb_modules_dir, self.modules_dir):
os.makedirs(dirname)
foo_manifest_path = os.path.join(self.foo_dir,
module_config._MANIFEST_NAME)
with open(foo_manifest_path, 'w') as fp:
fp.write(
'module_name: modules.foo.foo_module\n'
'container_version: 1.6.0\n'
'tests:\n'
' tests.ext.foo.foo_tests.FooTest: 1\n'
'third_party_libraries:\n'
'- name: foo_stuff.zip\n')
self._make_module(self.foo_src_dir, 'foo_module.py')
foo_installer_path = os.path.join(self.foo_dir, 'scripts', 'setup.sh')
with open(foo_installer_path, 'w') as fp:
fp.write(
'#!/bin/bash\n'
'ln -s $(pwd)/src $2/modules/foo\n'
'touch $2/lib/foo_stuff.zip\n'
)
bar_manifest_path = os.path.join(self.bar_dir,
module_config._MANIFEST_NAME)
with open(bar_manifest_path, 'w') as fp:
fp.write(
'module_name: modules.bar.bar_module\n'
'container_version: 1.6.0\n'
'tests:\n'
' tests.ext.bar.bar_tests.BarTest: 1\n'
'appengine_libraries:\n'
'- name: endpoints\n'
' version: "1.0"\n')
self._make_module(self.bar_src_dir, 'bar_module.py')
bar_installer_path = os.path.join(self.bar_dir, 'scripts', 'setup.sh')
with open(bar_installer_path, 'w') as fp:
fp.write(
'#!/bin/bash\n'
'ln -s $(pwd)/src $2/modules/bar\n'
)
self.initial_app_yaml = [
'application: mycourse\n',
'runtime: python27\n',
'api_version: 1\n',
'threadsafe: false\n',
'\n',
'env_variables:\n',
' GCB_PRODUCT_VERSION: "1.6.0"\n',
'\n',
'libraries:\n',
'- name: jinja2\n',
' version: "2.6"\n',
]
self.app_yaml_path = os.path.join(self.cb_dir, 'app.yaml')
self._write_content(self.app_yaml_path, self.initial_app_yaml)
self.third_party_tests_path = os.path.join(
self.scripts_dir, 'third_party_tests.yaml')
with open(os.path.join(self.cb_modules_dir, '__init__.py'), 'w'):
pass
self.log_stream = cStringIO.StringIO()
self.old_log_handlers = list(module_config._LOG.handlers)
module_config._LOG.handlers = [logging.StreamHandler(self.log_stream)]
self.save_bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = self.cb_dir
self.save_sys_path = sys.path
sys.path.insert(0, self.cb_dir)
self.save_modules = sys.modules.pop('modules')
def tearDown(self):
module_config._LOG.handlers = self.old_log_handlers
appengine_config.BUNDLE_ROOT = self.save_bundle_root
sys.path = self.save_sys_path
sys.modules['modules'] = self.save_modules
super(ModuleIncorporationTest, self).tearDown()
def _install(self, modules_arg):
if modules_arg:
args = module_config.PARSER.parse_args([modules_arg])
else:
args = module_config.PARSER.parse_args([])
try:
module_config.main(args, self.cb_dir, self.modules_dir)
except Exception, ex:
self._dump_tree()
traceback.print_exc()
raise ex
def _get_log(self):
self.log_stream.flush()
ret = self.log_stream.getvalue()
self.log_stream.reset()
return ret
def _expect_logs(self, expected_lines):
actual_lines = self._get_log().split('\n')
for expected, actual in zip(expected_lines, actual_lines):
self.assertIn(expected, actual)
def test_install_foo(self):
self._install('--targets=foo@%s' % self.foo_dir)
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_LIBRARIES: foo_stuff.zip\n',
' GCB_THIRD_PARTY_MODULES: modules.foo.foo_module\n'] +
self.initial_app_yaml[7:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.foo.foo_tests.FooTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Downloading module foo',
'Installing module foo',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_with_file_localhost_url(self):
self._install('--targets=foo@file://localhost%s' %
os.path.abspath(self.foo_dir))
expected = [
'Downloading module foo',
'Installing module foo',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_with_file_nohost_url(self):
self._install('--targets=foo@file://%s' %
os.path.abspath(self.foo_dir))
expected = [
'Downloading module foo',
'Installing module foo',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_with_relative_path_url(self):
self._install('--targets=foo@file://%s' %
os.path.relpath(self.foo_dir))
expected = [
'Downloading module foo',
'Installing module foo',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_with_relative_path(self):
self._install('--targets=foo@%s' % os.path.relpath(self.foo_dir))
expected = [
'Downloading module foo',
'Installing module foo',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_both(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_LIBRARIES: foo_stuff.zip\n',
' GCB_THIRD_PARTY_MODULES:\n',
' modules.foo.foo_module\n',
' modules.bar.bar_module\n'] +
self.initial_app_yaml[7:9] +
['- name: endpoints\n',
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.bar.bar_tests.BarTest: 1\n',
' tests.ext.foo.foo_tests.FooTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Downloading module foo',
'Installing module foo',
'Downloading module bar',
'Installing module bar',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_reinstall_both(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
self._get_log()
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_LIBRARIES: foo_stuff.zip\n',
' GCB_THIRD_PARTY_MODULES:\n',
' modules.foo.foo_module\n',
' modules.bar.bar_module\n'] +
self.initial_app_yaml[7:9] +
['- name: endpoints\n',
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.bar.bar_tests.BarTest: 1\n',
' tests.ext.foo.foo_tests.FooTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_reinstall_both_after_manual_removal(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
self._get_log()
os.unlink(os.path.join(self.cb_dir, 'modules', 'foo'))
os.unlink(os.path.join(self.cb_dir, 'modules', 'bar'))
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_LIBRARIES: foo_stuff.zip\n',
' GCB_THIRD_PARTY_MODULES:\n',
' modules.foo.foo_module\n',
' modules.bar.bar_module\n'] +
self.initial_app_yaml[7:9] +
['- name: endpoints\n',
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.bar.bar_tests.BarTest: 1\n',
' tests.ext.foo.foo_tests.FooTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Installing module foo',
'Installing module bar',
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_both_then_reinstall_foo(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
self._get_log()
self._install('--targets=foo@%s' % self.foo_dir)
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_LIBRARIES: foo_stuff.zip\n',
' GCB_THIRD_PARTY_MODULES: modules.foo.foo_module\n'] +
self.initial_app_yaml[7:9] +
['- name: endpoints\n', # Note that AE lib requirement stays.
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.foo.foo_tests.FooTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_both_then_reinstall_bar(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
self._get_log()
self._install('--targets=bar@%s' % self.bar_dir)
expected = (
self.initial_app_yaml[:7] +
[' GCB_THIRD_PARTY_MODULES: modules.bar.bar_module\n'] +
self.initial_app_yaml[7:9] +
['- name: endpoints\n',
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
expected = [
'tests:\n',
' tests.ext.bar.bar_tests.BarTest: 1\n',
]
self._assert_content_equals(self.third_party_tests_path, expected)
expected = [
'Updating scripts/third_party_tests.yaml',
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_install_both_then_reinstall_none(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
self._get_log()
self._install(None)
expected = (
self.initial_app_yaml[:9] +
['- name: endpoints\n', # Note that AE lib requirement stays.
' version: "1.0"\n',] +
self.initial_app_yaml[9:]
)
self._assert_content_equals(self.app_yaml_path, expected)
self.assertFalse(os.path.exists(self.third_party_tests_path))
expected = [
'Updating app.yaml',
'You should change this from its default',
]
self._expect_logs(expected)
def test_appengine_config(self):
self._install('--targets=foo@%s,bar@%s' % (self.foo_dir, self.bar_dir))
yaml = yaml_files.AppYamlFile(self.app_yaml_path)
os.environ.update(yaml.get_all_env())
# Touch into place the hard-coded expected set of libs so we don't
# get spurious errors.
for lib in appengine_config.ALL_LIBS:
with open(lib.file_path, 'w'):
pass
# Just looking for no crash.
appengine_config._import_and_enable_modules('GCB_THIRD_PARTY_MODULES',
reraise=True)
appengine_config.gcb_init_third_party()
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2017, 2018
""" Library used for backup and restore operations
"""
import subprocess
import time
import os
import asyncio
import json
from enum import IntEnum
from foglamp.common import logger
from foglamp.common.storage_client import payload_builder
from foglamp.common.storage_client.storage_client import StorageClientAsync
from foglamp.common.configuration_manager import ConfigurationManager
import foglamp.plugins.storage.postgres.backup_restore.exceptions as exceptions
__author__ = "Stefano Simonelli"
__copyright__ = "Copyright (c) 2017, 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_MODULE_NAME = "foglamp_backup_postgres_library"
_MESSAGES_LIST = {
# Information messages
"i000000": "Information",
# Warning / Error messages
"e000000": "general error",
"e000001": "semaphore file deleted because it was already in existence - file |{0}|",
"e000002": "semaphore file deleted because it existed even if the corresponding process was not running "
"- file |{0}| - pid |{1}|",
"e000003": "ERROR - the library cannot be executed directly."
}
""" Messages used for Information, Warning and Error notice """
_CMD_TIMEOUT = " timeout --signal=9 "
""" Every external commands will be launched using timeout to avoid endless executions """
_logger = None
_storage = None
"""" Objects references assigned by the caller """
def exec_wait(_cmd, _output_capture=False, _timeout=0):
""" Executes an external/shell commands
Args:
_cmd: command to execute
_output_capture: if the output of the command should be captured or not
_timeout: 0 no timeout or the timeout in seconds for the execution of the command
Returns:
_exit_code: exit status of the command
_output: output of the command
Raises:
"""
_output = ""
if _timeout != 0:
_cmd = _CMD_TIMEOUT + str(_timeout) + " " + _cmd
_logger.debug("{func} - Executing command using the timeout |{timeout}| ".format(
func="exec_wait",
timeout=_timeout))
_logger.debug("{func} - cmd |{cmd}| ".format(func="exec_wait",
cmd=_cmd))
if _output_capture:
process = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
process = subprocess.Popen(_cmd, shell=True)
_exit_code = process.wait()
if _output_capture:
output_step1 = process.stdout.read()
_output = output_step1.decode("utf-8")
_logger.debug("{func} - Executed command - cmd |{cmd}| - exit_code |{exit_code}| - output |{output}| ".format(
func="exec_wait",
cmd=_cmd,
exit_code=_exit_code,
output=_output))
return _exit_code, _output
def exec_wait_retry(cmd, output_capture=False, exit_code_ok=0, max_retry=3, write_error=True, sleep_time=1, timeout=0):
""" Executes an external command retrying x time the operation up to the exit status match a specific value
Args:
cmd: command to execute
output_capture: if the output of the command should be captured or not
exit_code_ok: exit status to achieve
max_retry: maximum number of retries to achieve the desired exit status
write_error: if a message should be generated for each retry
sleep_time: seconds to sleep between each retry
timeout: 0= no timeout, or the timeout in seconds for the execution of the external command
Returns:
_exit_code: exit status of the command
_output: output of the command
Raises:
"""
global _logger
_logger.debug("{func} - cmd |{cmd}| ".format(func="exec_wait_retry",
cmd=cmd))
_exit_code = 0
_output = ""
# try X times the operation
retry = 1
loop_continue = True
while loop_continue:
_exit_code, _output = exec_wait(cmd, output_capture, timeout)
if _exit_code == exit_code_ok:
loop_continue = False
elif retry <= max_retry:
# Prepares for the retry operation
if write_error:
short_output = _output[0:50]
_logger.debug("{func} - cmd |{cmd}| - N retry |{retry}| - message |{msg}| ".format(
func="exec_wait_retry",
cmd=cmd,
retry=retry,
msg=short_output)
)
time.sleep(sleep_time)
retry += 1
else:
loop_continue = False
return _exit_code, _output
def cr_strip(text):
"""
Args:
Returns:
Raises:
"""
text = text.replace("\n", "")
text = text.replace("\r", "")
return text
class BackupType (IntEnum):
""" Supported backup types """
FULL = 1
INCREMENTAL = 2
class SortOrder (object):
""" Define the order used to present information """
ASC = 'ASC'
DESC = 'DESC'
class BackupStatus (object):
""" Backup status """
UNDEFINED = -1
RUNNING = 1
COMPLETED = 2
CANCELLED = 3
INTERRUPTED = 4
FAILED = 5
RESTORED = 6
ALL = 999
text = {
UNDEFINED: "undefined",
RUNNING: "running",
COMPLETED: "completed",
CANCELLED: "cancelled",
INTERRUPTED: "interrupted",
FAILED: "failed",
RESTORED: "restored",
ALL: "all"
}
class BackupRestoreLib(object):
""" Library of functionalities for the backup restore operations that requires information/state to be stored """
STORAGE_EXE = "/services/foglamp.services.storage"
MAX_NUMBER_OF_BACKUPS_TO_RETRIEVE = 9999
"""" Maximum number of backup information to retrieve from the storage layer"""
STORAGE_TABLE_BACKUPS = "backups"
""" Table name containing the backups information"""
JOB_SEM_FILE_PATH = "/tmp"
""" Updated by the caller to the proper value """
JOB_SEM_FILE_BACKUP = ".backup.sem"
JOB_SEM_FILE_RESTORE = ".restore.sem"
"""" Semaphores information for the handling of the backup/restore synchronization """
# Postgres commands
PG_COMMAND_DUMP = "pg_dump"
PG_COMMAND_RESTORE = "pg_restore"
PG_COMMAND_PSQL = "psql"
PG_COMMANDS = {PG_COMMAND_DUMP: None,
PG_COMMAND_RESTORE: None,
PG_COMMAND_PSQL: None
}
"""List of Postgres commands to check/validate if they are available and usable
and the actual Postgres commands to use """
_MESSAGES_LIST = {
# Information messages
"i000001": "Execution started.",
"i000002": "Execution completed.",
# Warning / Error messages
"e000000": "general error",
"e000001": "cannot initialize the logger - error details |{0}|",
"e000002": "cannot retrieve the configuration from the manager, trying retrieving from file "
"- error details |{0}|",
"e000003": "cannot retrieve the configuration from file - error details |{0}|",
"e000004": "...",
"e000005": "...",
"e000006": "...",
"e000007": "backup failed.",
"e000008": "cannot execute the backup, either a backup or a restore is already running - pid |{0}|",
"e000009": "...",
"e000010": "directory used to store backups doesn't exist - dir |{0}|",
"e000011": "directory used to store semaphores for backup/restore synchronization doesn't exist - dir |{0}|",
"e000012": "cannot create the configuration cache file, neither FOGLAMP_DATA nor FOGLAMP_ROOT are defined.",
"e000013": "cannot create the configuration cache file, provided path is not a directory - dir |{0}|",
"e000014": "the identified path of backups doesn't exists, creation was tried "
"- dir |{0}| - error details |{1}|",
"e000015": "The command is not available neither using the unmanaged approach"
" - command |{0}|",
"e000016": "Postgres command is not executable - command |{0}|",
"e000017": "The execution of the Postgres command using the -V option produce an error"
" - command |{0}| - output |{1}|",
"e000018": "It is not possible to read data from Postgres"
" - command |{0}| - exit code |{1}| - output |{2}|",
"e000019": "The command is not available using the managed approach"
" - command |{0}| - full command |{1}|",
"e000020": "It is not possible to evaluate if the storage is managed or unmanaged"
" - storage plugin |{0}|",
"e000021": "the SQL command generates an error - error details |{0}| - full command |{1}|",
}
""" Messages used for Information, Warning and Error notice """
_DIR_MANAGED_FOGLAMP_PG_COMMANDS = "plugins/storage/postgres/pgsql/bin"
"""Directory for Postgres commands in a managed configuration"""
_DB_CONNECTION_STRING = "dbname='{db}'"
_DEFAULT_FOGLAMP_ROOT = "/usr/local/foglamp"
""" Default value to use for the FOGLAMP_ROOT if the environment $FOGLAMP_ROOT is not defined """
_BACKUP_FILE_NAME_PREFIX = "foglamp_backup_"
""" Prefix used to generate a backup file name """
_CONFIG_CACHE_FILE = "backup_postgres_configuration_cache.json"
""" Stores a configuration cache in case the configuration Manager is not available"""
# Configuration retrieved from the Configuration Manager
_CONFIG_CATEGORY_NAME = 'BACK_REST'
_CONFIG_CATEGORY_DESCRIPTION = 'Backup and Restore'
_CONFIG_DEFAULT = {
"host": {
"description": "Host server",
"type": "string",
"default": "localhost"
},
"port": {
"description": "PostgreSQL port",
"type": "integer",
"default": "5432"
},
"database": {
"description": "Database to backup/restore",
"type": "string",
"default": "foglamp"
},
"schema": {
"description": "Schema",
"type": "string",
"default": "foglamp"
},
"backup-dir": {
"description": "Directory where backups will be created, "
"it uses backup-dir if it is specified "
"or FOGLAMP_BACKUP if defined or FOGLAMP_DATA/backup as the last resort",
"type": "string",
"default": "none"
},
"semaphores-dir": {
"description": "Directory for semaphores for backup/restore synchronization, "
"if not specified, backup-dir will be used",
"type": "string",
"default": "none"
},
"retention": {
"description": "Number of backups to maintain. Old backups will be deleted",
"type": "integer",
"default": "5"
},
"max_retry": {
"description": "Number of retries",
"type": "integer",
"default": "5"
},
"timeout": {
"description": "Timeout in seconds for execution of external commands",
"type": "integer",
"default": "1200"
},
"restart-max-retries": {
"description": "Maximum number of retries at restarting Foglamp",
"type": "integer",
"default": "10"
},
"restart-sleep": {
"description": "Sleep time between each check of the status at the restart of Foglamp "
"to ensure it is started successfully",
"type": "integer",
"default": "5"
},
}
config = {}
_storage = None
_logger = None
def __init__(self, _storage, _logger):
self._storage = _storage
self._logger = _logger
self.config = {}
# FogLAMP directories
self.dir_foglamp_root = ""
self.dir_foglamp_data = ""
self.dir_foglamp_data_etc = ""
self.dir_foglamp_backup = ""
self.dir_backups = ""
self.dir_semaphores = ""
def sl_backup_status_create(self, _file_name, _type, _status):
""" Logs the creation of the backup in the Storage layer
Args:
_file_name: file_name used for the backup as a full path
_type: backup type {BackupType.FULL|BackupType.INCREMENTAL}
_status: backup status, usually BackupStatus.RUNNING
Returns:
Raises:
"""
_logger.debug("{func} - file name |{file}| ".format(func="sl_backup_status_create", file=_file_name))
payload = payload_builder.PayloadBuilder() \
.INSERT(file_name=_file_name,
ts="now()",
type=_type,
status=_status,
exit_code=0) \
.payload()
asyncio.get_event_loop().run_until_complete(self._storage.insert_into_tbl(self.STORAGE_TABLE_BACKUPS, payload))
def sl_backup_status_update(self, _id, _status, _exit_code):
""" Updates the status of the backup using the Storage layer
Args:
_id: Backup's Id to update
_status: status of the backup {BackupStatus.SUCCESSFUL|BackupStatus.RESTORED}
_exit_code: exit status of the backup/restore execution
Returns:
Raises:
"""
_logger.debug("{func} - id |{file}| ".format(func="sl_backup_status_update", file=_id))
payload = payload_builder.PayloadBuilder() \
.SET(status=_status,
ts="now()",
exit_code=_exit_code) \
.WHERE(['id', '=', _id]) \
.payload()
asyncio.get_event_loop().run_until_complete(self._storage.update_tbl(self.STORAGE_TABLE_BACKUPS, payload))
def sl_get_backup_details_from_file_name(self, _file_name):
""" Retrieves backup information from file name
Args:
_file_name: file name to search in the Storage layer
Returns:
backup_information: Backup information related to the file name
Raises:
exceptions.DoesNotExist
exceptions.NotUniqueBackup
"""
payload = payload_builder.PayloadBuilder() \
.WHERE(['file_name', '=', _file_name]) \
.payload()
backups_from_storage = asyncio.get_event_loop().run_until_complete(self._storage.query_tbl_with_payload(self.STORAGE_TABLE_BACKUPS, payload))
if backups_from_storage['count'] == 1:
backup_information = backups_from_storage['rows'][0]
elif backups_from_storage['count'] == 0:
raise exceptions.DoesNotExist
else:
raise exceptions.NotUniqueBackup
return backup_information
def check_for_execution_restore(self):
""" Executes all the checks to ensure the prerequisites to execute the backup are met
Args:
Returns:
Raises:
"""
self._check_commands()
def check_for_execution_backup(self):
""" Executes all the checks to ensure the prerequisites to execute the backup are met
Args:
Returns:
Raises:
"""
self._check_commands()
self._check_db()
def _check_db(self):
""" Checks if the database is working properly reading a sample row from the backups table
Args:
Returns:
Raises:
exceptions.CannotReadPostgres
"""
cmd_psql = self.PG_COMMANDS[self.PG_COMMAND_PSQL]
cmd = '{psql} -d {db} -t -c "SELECT id FROM {schema}.{table} LIMIT 1;"'.format(
psql=cmd_psql,
db=self.config['database'],
schema=self.config['schema'],
table=self.STORAGE_TABLE_BACKUPS)
_exit_code, output = exec_wait(
_cmd=cmd,
_output_capture=True,
_timeout=self.config['timeout']
)
self._logger.debug("{func} - cmd |{cmd}| - exit_code |{exit_code}| output |{output}| ".format(
func="_check_db",
cmd=cmd,
exit_code=_exit_code,
output=cr_strip(output)))
if _exit_code != 0:
_message = self._MESSAGES_LIST["e000018"].format(cmd, _exit_code, output)
self._logger.error("{0}".format(_message))
raise exceptions.CannotReadPostgres(_message)
def _check_commands(self):
""" Identify and checks the Postgres commands
Args:
Returns:
Raises:
"""
for cmd in self.PG_COMMANDS:
cmd_identified = self._check_command_identification(cmd)
self._check_command_test(cmd_identified)
def _check_command_identification(self, cmd_to_identify):
""" Identifies the proper Postgres command to use, 2 possible cases :
Managed - command is available in $FOGLAMP_ROOT/plugins/storage/postgres/pgsql/bin
Unmanaged - checks using the path and it identifies the used command through 'command -v'
Args:
cmd_to_identify: str - command to identify
Returns:
cmd_identified: str - actual identified command to use
Raises:
exceptions.PgCommandUnAvailable
"""
is_managed = self._is_plugin_managed("postgres")
if is_managed:
# Checks for Managed
cmd_managed = "{root}/{path}/{cmd}".format(
root=self.dir_foglamp_root,
path=self._DIR_MANAGED_FOGLAMP_PG_COMMANDS,
cmd=cmd_to_identify)
if os.path.exists(cmd_managed):
cmd_identified = cmd_managed
else:
_message = self._MESSAGES_LIST["e000019"].format(cmd_to_identify, cmd_managed)
self._logger.error("{0}".format(_message))
raise exceptions.PgCommandUnAvailable(_message)
else:
# Checks for Unmanaged
cmd = "command -v " + cmd_to_identify
# The timeout command can't be used with 'command'
# noinspection PyArgumentEqualDefault
_exit_code, output = exec_wait(
_cmd=cmd,
_output_capture=True,
_timeout=0
)
self._logger.debug("{func} - cmd |{cmd}| - exit_code |{exit_code}| output |{output}| ".format(
func="_check_command_identification",
cmd=cmd,
exit_code=_exit_code,
output=output))
if _exit_code == 0:
cmd_identified = cr_strip(output)
else:
_message = self._MESSAGES_LIST["e000015"].format(cmd)
self._logger.error("{0}".format(_message))
raise exceptions.PgCommandUnAvailable(_message)
self.PG_COMMANDS[cmd_to_identify] = cmd_identified
return cmd_identified
def _is_plugin_managed(self, plugin_to_identify):
""" Identifies the type of plugin, Managed or not, inquiring the storage executable
Args:
plugin_to_identify: str - plugin to evaluate if it is managed or not
Returns:
type: boolean - True if it is a managed plugin
Raises:
"""
plugin_type = False
# The storage executable requires the environment FOGLAMP_DATA, so it ensures it is valued
cmd = "export FOGLAMP_DATA={data_dir};".format(data_dir=self.dir_foglamp_data)
# Inquires the storage
file_full_path = self.dir_foglamp_root + self.STORAGE_EXE
cmd += file_full_path + " --plugin"
# noinspection PyArgumentEqualDefault
_exit_code, output = exec_wait(
_cmd=cmd,
_output_capture=True,
_timeout=0
)
self._logger.debug("{func} - cmd |{cmd}| - exit_code |{exit_code}| output |{output}| ".format(
func="_is_plugin_managed",
cmd=cmd,
exit_code=_exit_code,
output=output))
# Evaluates the storage answer
if plugin_to_identify in output:
if "false" in output:
plugin_type = False
else:
plugin_type = True
else:
_message = self._MESSAGES_LIST["e000020"].format(plugin_to_identify)
self._logger.error("{0}".format(_message))
raise exceptions.UndefinedStorage(_message)
return plugin_type
def _check_command_test(self, cmd_to_test):
""" Tests if the Postgres command could be successfully launched/used
Args:
cmd_to_test: str - Command to test
Returns:
Raises:
exceptions.PgCommandUnAvailable
exceptions.PgCommandNotExecutable
"""
if os.access(cmd_to_test, os.X_OK):
cmd = cmd_to_test + " -V"
_exit_code, output = exec_wait(
_cmd=cmd,
_output_capture=True,
_timeout=self.config['timeout']
)
self._logger.debug("{func} - cmd |{cmd}| - exit_code |{exit_code}| output |{output}| ".format(
func="_check_command_test",
cmd=cmd,
exit_code=_exit_code,
output=output))
if _exit_code != 0:
_message = self._MESSAGES_LIST["e000017"].format(cmd, output)
self._logger.error("{0}".format(_message))
raise exceptions.PgCommandUnAvailable(_message)
else:
_message = self._MESSAGES_LIST["e000016"].format(cmd_to_test)
self._logger.error("{0}".format(_message))
raise exceptions.PgCommandNotExecutable(_message)
def sl_get_backup_details(self, backup_id: int) -> dict:
""" Returns the details of a backup
Args:
backup_id: int - the id of the backup to return
Returns:
backup_information: all the information available related to the requested backup_id
Raises:
exceptions.DoesNotExist
exceptions.NotUniqueBackup
"""
payload = payload_builder.PayloadBuilder().SELECT("id", "status", "ts", "file_name", "type")\
.ALIAS("return", ("ts", 'ts')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS"))\
.WHERE(['id', '=', backup_id]).payload()
backup_from_storage = asyncio.get_event_loop().run_until_complete(self._storage.query_tbl_with_payload(self.STORAGE_TABLE_BACKUPS, payload))
if backup_from_storage['count'] == 0:
raise exceptions.DoesNotExist
elif backup_from_storage['count'] == 1:
backup_information = backup_from_storage['rows'][0]
else:
raise exceptions.NotUniqueBackup
return backup_information
def psql_cmd(self, sql_cmd):
""" Execute a sql command and return the results using the psql command line tool
Args:
sql_cmd - the SQL Command
Returns:
result_data - data returned from the execution
Raises:
"""
cmd_psql = self.PG_COMMANDS[self.PG_COMMAND_PSQL]
cmd = '{psql} -qt -d {db} -c "{sql}"'.format(
psql=cmd_psql,
db=self.config['database'],
sql=sql_cmd)
result_code, output_1 = exec_wait(cmd, True)
output_2 = output_1.replace("\n", "")
result_data = output_2.split('|')
# Error handling required
if result_code != 0:
raise exceptions.SQLCommandExecutionError(self._MESSAGES_LIST["e000021"].format(output_1, cmd))
return result_data
def backup_status_update(self, backup_id, status):
""" Updates the status of the backup in the Storage layer
Args:
backup_id: int -
status: BackupStatus -
Returns:
Raises:
"""
_logger.debug("{func} - backup id |{id}| ".format(func="backup_status_update",
id=backup_id))
sql_cmd = """
UPDATE foglamp.backups SET status={status} WHERE id='{id}';
""".format(status=status,
id=backup_id, )
self.psql_cmd(sql_cmd)
def retrieve_configuration(self):
""" Retrieves the configuration either from the manager or from a local file.
the local configuration file is used if the configuration manager is not available
and updated with the values retrieved from the manager when feasible.
Args:
Returns:
Raises:
exceptions.ConfigRetrievalError
"""
global JOB_SEM_FILE_PATH
try:
self._retrieve_configuration_from_manager()
except Exception as _ex:
_message = self._MESSAGES_LIST["e000002"].format(_ex)
self._logger.warning(_message)
try:
self._retrieve_configuration_from_file()
except Exception as _ex:
_message = self._MESSAGES_LIST["e000003"].format(_ex)
self._logger.error(_message)
raise exceptions.ConfigRetrievalError(_message)
else:
self._update_configuration_file()
# Identifies the directory of backups and checks its existence
if self.config['backup-dir'] != "none":
self.dir_backups = self.config['backup-dir']
else:
self.dir_backups = self.dir_foglamp_backup
self._check_create_path(self.dir_backups)
# Identifies the directory for the semaphores and checks its existence
# Stores semaphores in the _backups_dir if semaphores-dir is not defined
if self.config['semaphores-dir'] != "none":
self.dir_semaphores = self.config['semaphores-dir']
else:
self.dir_semaphores = self.dir_backups
JOB_SEM_FILE_PATH = self.dir_semaphores
self._check_create_path(self.dir_semaphores)
def evaluate_paths(self):
""" Evaluates paths in relation to the environment variables
FOGLAMP_ROOT, FOGLAMP_DATA and FOGLAMP_BACKUP
Args:
Returns:
Raises:
"""
# Evaluates FOGLAMP_ROOT
if "FOGLAMP_ROOT" in os.environ:
self.dir_foglamp_root = os.getenv("FOGLAMP_ROOT")
else:
self.dir_foglamp_root = self._DEFAULT_FOGLAMP_ROOT
# Evaluates FOGLAMP_DATA
if "FOGLAMP_DATA" in os.environ:
self.dir_foglamp_data = os.getenv("FOGLAMP_DATA")
else:
self.dir_foglamp_data = self.dir_foglamp_root + "/data"
# Evaluates FOGLAMP_BACKUP
if "FOGLAMP_BACKUP" in os.environ:
self.dir_foglamp_backup = os.getenv("FOGLAMP_BACKUP")
else:
self.dir_foglamp_backup = self.dir_foglamp_data + "/backup"
# Evaluates etc directory
self.dir_foglamp_data_etc = self.dir_foglamp_data + "/etc"
self._check_create_path(self.dir_foglamp_backup)
self._check_create_path(self.dir_foglamp_data_etc)
def _check_create_path(self, path):
""" Checks path existences and creates it if needed
Args:
Returns:
Raises:
exceptions.InvalidBackupsPath
"""
# Check the path existence
if not os.path.isdir(path):
# The path doesn't exists, tries to create it
try:
os.makedirs(path)
except OSError as _ex:
_message = self._MESSAGES_LIST["e000014"].format(path, _ex)
self._logger.error("{0}".format(_message))
raise exceptions.InvalidPath(_message)
def _retrieve_configuration_from_manager(self):
"""" Retrieves the configuration from the configuration manager
Args:
Returns:
Raises:
"""
_event_loop = asyncio.get_event_loop()
cfg_manager = ConfigurationManager(self._storage)
_event_loop.run_until_complete(cfg_manager.create_category(
self._CONFIG_CATEGORY_NAME,
self._CONFIG_DEFAULT,
self._CONFIG_CATEGORY_DESCRIPTION))
self._config_from_manager = _event_loop.run_until_complete(cfg_manager.get_category_all_items
(self._CONFIG_CATEGORY_NAME))
self._decode_configuration_from_manager(self._config_from_manager)
def _decode_configuration_from_manager(self, _config_from_manager):
"""" Decodes a json configuration as generated by the configuration manager
Args:
_config_from_manager: Json configuration to decode
Returns:
Raises:
"""
self.config['host'] = _config_from_manager['host']['value']
self.config['port'] = int(_config_from_manager['port']['value'])
self.config['database'] = _config_from_manager['database']['value']
self.config['schema'] = _config_from_manager['schema']['value']
self.config['backup-dir'] = _config_from_manager['backup-dir']['value']
self.config['semaphores-dir'] = _config_from_manager['semaphores-dir']['value']
self.config['retention'] = int(_config_from_manager['retention']['value'])
self.config['max_retry'] = int(_config_from_manager['max_retry']['value'])
self.config['timeout'] = int(_config_from_manager['timeout']['value'])
self.config['restart-max-retries'] = int(_config_from_manager['restart-max-retries']['value'])
self.config['restart-sleep'] = int(_config_from_manager['restart-sleep']['value'])
def _retrieve_configuration_from_file(self):
"""" Retrieves the configuration from the local file
Args:
Returns:
Raises:
"""
file_full_path = self._identify_configuration_file_path()
with open(file_full_path) as file:
self._config_from_manager = json.load(file)
self._decode_configuration_from_manager(self._config_from_manager)
def _update_configuration_file(self):
""" Updates the configuration file with the values retrieved from tha manager.
Args:
Returns:
Raises:
"""
file_full_path = self._identify_configuration_file_path()
with open(file_full_path, 'w') as file:
json.dump(self._config_from_manager, file)
def _identify_configuration_file_path(self):
""" Identifies the path of the configuration cache file,
Args:
Returns:
Raises:
"""
file_full_path = self.dir_foglamp_data_etc + "/" + self._CONFIG_CACHE_FILE
return file_full_path
class Job:
"""" Handles backup and restore operations synchronization """
@classmethod
def _pid_file_retrieve(cls, file_name):
""" Retrieves the PID from the semaphore file
Args:
file_name: full path of the semaphore file
Returns:
pid: pid retrieved from the semaphore file
Raises:
"""
with open(file_name) as f:
pid = f.read()
pid = int(pid)
return pid
@classmethod
def _pid_file_create(cls, file_name, pid):
""" Creates the semaphore file having the PID as content
Args:
file_name: full path of the semaphore file
pid: pid to store into the semaphore file
Returns:
Raises:
"""
file = open(file_name, "w")
file.write(str(pid))
file.close()
@classmethod
def _check_semaphore_file(cls, file_name):
""" Evaluates if a specific either backup or restore operation is in execution
Args:
file_name: semaphore file, full path
Returns:
pid: 0= no operation is in execution or the pid retrieved from the semaphore file
Raises:
"""
_logger.debug("{func}".format(func="check_semaphore_file"))
pid = 0
if os.path.exists(file_name):
pid = cls._pid_file_retrieve(file_name)
# Check if the process is really running
try:
os.getpgid(pid)
except ProcessLookupError:
# Process is not running, removing the semaphore file
os.remove(file_name)
_message = _MESSAGES_LIST["e000002"].format(file_name, pid)
_logger.warning("{0}".format(_message))
pid = 0
return pid
@classmethod
def is_running(cls):
""" Evaluates if another either backup or restore job is already running
Args:
Returns:
pid: 0= no operation is in execution or the pid retrieved from the semaphore file
Raises:
"""
_logger.debug("{func}".format(func="is_running"))
# Checks if a backup process is still running
full_path_backup = JOB_SEM_FILE_PATH + "/" + BackupRestoreLib.JOB_SEM_FILE_BACKUP
pid = cls._check_semaphore_file(full_path_backup)
# Checks if a restore process is still running
if pid == 0:
full_path_restore = JOB_SEM_FILE_PATH + "/" + BackupRestoreLib.JOB_SEM_FILE_RESTORE
pid = cls._check_semaphore_file(full_path_restore)
return pid
@classmethod
def set_as_running(cls, file_name, pid):
""" Sets a job as running
Args:
file_name: semaphore file either fot backup or restore
pid: pid of the process to be stored within the semaphore file
Returns:
Raises:
"""
_logger.debug("{func}".format(func="set_as_running"))
full_path = JOB_SEM_FILE_PATH + "/" + file_name
if os.path.exists(full_path):
os.remove(full_path)
_message = _MESSAGES_LIST["e000001"].format(full_path)
_logger.warning("{0}".format(_message))
cls._pid_file_create(full_path, pid)
@classmethod
def set_as_completed(cls, file_name):
""" Sets a job as completed
Args:
file_name: semaphore file either for backup or restore operations
Returns:
Raises:
"""
_logger.debug("{func}".format(func="set_as_completed"))
full_path = JOB_SEM_FILE_PATH + "/" + file_name
if os.path.exists(full_path):
os.remove(full_path)
if __name__ == "__main__":
message = _MESSAGES_LIST["e000003"]
print(message)
if False:
# Used to assign the proper objects type without actually executing them
_storage = StorageClientAsync("127.0.0.1", "0")
_logger = logger.setup(_MODULE_NAME)
| |
import sys
import socket
import select
import collections
import json
import errno
import os.path
try:
import ssl
assert ssl
except ImportError:
ssl = False
try:
from ... import editor
from .. import api, cert, msg, shared as G, utils
from ..exc_fmt import str_e
from . import base, proxy
assert cert and G and msg and proxy and utils
except (ImportError, ValueError):
from floo import editor
from floo.common import api, cert, msg, shared as G, utils
from floo.common.exc_fmt import str_e
import base
import proxy
try:
connect_errno = (errno.WSAEWOULDBLOCK, errno.WSAEALREADY, errno.WSAEINVAL)
iscon_errno = errno.WSAEISCONN
write_again_errno = (errno.EWOULDBLOCK, errno.EAGAIN) + connect_errno
except Exception:
connect_errno = (errno.EINPROGRESS, errno.EALREADY)
iscon_errno = errno.EISCONN
write_again_errno = (errno.EWOULDBLOCK, errno.EAGAIN) + connect_errno
PY2 = sys.version_info < (3, 0)
def sock_debug(*args, **kwargs):
if G.SOCK_DEBUG:
msg.log(*args, **kwargs)
class FlooProtocol(base.BaseProtocol):
''' Base FD Interface'''
MAX_RETRIES = 12
INITIAL_RECONNECT_DELAY = 500
def __init__(self, host, port, secure=True):
super(FlooProtocol, self).__init__(host, port, secure)
self._handling = False
self.connected = False
self._needs_handshake = bool(secure)
self._sock = None
self._q = collections.deque()
self._slice = bytes()
self._buf_in = bytes()
self._buf_out = bytes()
self._reconnect_delay = self.INITIAL_RECONNECT_DELAY
self._retries = self.MAX_RETRIES
self._empty_reads = 0
self._reconnect_timeout = None
self._cert_path = os.path.join(G.BASE_DIR, 'startssl-ca.pem')
self.req_id = 0
self._host = host
self._port = port
self._secure = secure
self._proc = None
self.proxy = False
# Sublime Text has a busted SSL module on Linux. Spawn a proxy using OS Python.
if secure and ssl is False:
self.proxy = True
self._host = '127.0.0.1'
self._port = None
self._secure = False
def start_proxy(self, host, port):
if G.PROXY_PORT:
self._port = int(G.PROXY_PORT)
msg.log('SSL proxy in debug mode: Port is set to %s' % self._port)
return
args = ('python', '-m', 'floo.proxy', '--host=%s' % host, '--port=%s' % str(port), '--ssl=%s' % str(bool(self.secure)))
self._proc = proxy.ProxyProtocol()
self._proc.once('stop', self.reconnect)
self._port = self._proc.connect(args)
return self._port
def _handle(self, data):
self._buf_in += data
if self._handling:
return
self._handling = True
while True:
before, sep, after = self._buf_in.partition(b'\n')
if not sep:
break
try:
# Node.js sends invalid utf8 even though we're calling write(string, "utf8")
# Python 2 can figure it out, but python 3 hates it and will die here with some byte sequences
# Instead of crashing the plugin, we drop the data. Yes, this is horrible.
before = before.decode('utf-8', 'ignore')
data = json.loads(before)
except Exception as e:
msg.error('Unable to parse json: ', str_e(e))
msg.error('Data: ', before)
# XXXX: THIS LOSES DATA
self._buf_in = after
continue
name = data.get('name')
self._buf_in = after
try:
msg.debug('got data ' + (name or 'no name'))
self.emit('data', name, data)
except Exception as e:
api.send_error('Error handling %s event.' % name, str_e(e))
if name == 'room_info':
editor.error_message('Error joining workspace: %s' % str_e(e))
self.stop()
self._handling = False
def _connect(self, host, port, attempts=0):
if attempts > (self.proxy and 500 or 500):
msg.error('Connection attempt timed out.')
return self.reconnect()
if not self._sock:
msg.debug('_connect: No socket')
return
try:
self._sock.connect((host, port))
select.select([self._sock], [self._sock], [], 0)
except socket.error as e:
if e.errno == iscon_errno:
pass
elif e.errno in connect_errno:
msg.debug('connect_errno: ', str_e(e))
return utils.set_timeout(self._connect, 20, host, port, attempts + 1)
else:
msg.error('Error connecting: ', str_e(e))
return self.reconnect()
if self._secure:
sock_debug('SSL-wrapping socket')
self._sock = ssl.wrap_socket(self._sock, ca_certs=self._cert_path, cert_reqs=ssl.CERT_REQUIRED, do_handshake_on_connect=False)
self._q.clear()
self._buf_out = bytes()
self.emit('connect')
self.connected = True
def __len__(self):
return len(self._q)
def fileno(self):
return self._sock and self._sock.fileno()
def fd_set(self, readable, writeable, errorable):
if not self.connected:
return
fileno = self.fileno()
errorable.append(fileno)
if self._needs_handshake:
return writeable.append(fileno)
elif len(self) > 0 or self._buf_out:
writeable.append(fileno)
readable.append(fileno)
def connect(self, conn=None):
utils.cancel_timeout(self._reconnect_timeout)
self._reconnect_timeout = None
self.cleanup()
host = self._host
port = self._port
self._empty_selects = 0
# Only use proxy.floobits.com if we're trying to connect to floobits.com
G.OUTBOUND_FILTERING = G.OUTBOUND_FILTERING and self.host == 'floobits.com'
# TODO: Horrible code here
if self.proxy:
if G.OUTBOUND_FILTERING:
port = self.start_proxy(G.OUTBOUND_FILTER_PROXY_HOST, G.OUTBOUND_FILTER_PROXY_PORT)
else:
port = self.start_proxy(self.host, self.port)
elif G.OUTBOUND_FILTERING:
host = G.OUTBOUND_FILTER_PROXY_HOST
port = G.OUTBOUND_FILTER_PROXY_PORT
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.setblocking(False)
if self._secure:
with open(self._cert_path, 'wb') as cert_fd:
cert_fd.write(cert.CA_CERT.encode('utf-8'))
conn_msg = '%s:%s: Connecting...' % (self.host, self.port)
if self.port != self._port or self.host != self._host:
conn_msg += ' (proxying through %s:%s)' % (self._host, self._port)
if host != self._host:
conn_msg += ' (proxying through %s:%s)' % (host, port)
msg.log(conn_msg)
editor.status_message(conn_msg)
self._connect(host, port)
def cleanup(self, *args, **kwargs):
try:
self._sock.shutdown(2)
except Exception:
pass
try:
self._sock.close()
except Exception:
pass
try:
self._proc.cleanup()
except Exception:
pass
self._slice = bytes()
self._buf_in = bytes()
self._buf_out = bytes()
self._sock = None
self._needs_handshake = self._secure
self.connected = False
self._proc = None
self.emit('cleanup')
def _do_ssl_handshake(self):
try:
sock_debug('Doing SSL handshake')
self._sock.do_handshake()
except ssl.SSLError as e:
sock_debug('Floobits: ssl.SSLError. This is expected sometimes.')
if e.args[0] in [ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE]:
return False
self.stop()
editor.error_message('Floobits SSL handshake error: %s' % str(e))
sock_debug('SSLError args: %s' % ''.join([str(a) for a in e.args]))
except Exception as e:
msg.error('Error in SSL handshake: ', str_e(e))
else:
sock_debug('Successful handshake')
self._needs_handshake = False
editor.status_message('%s:%s: SSL handshake completed' % (self.host, self.port))
return True
self.reconnect()
return False
def write(self):
sock_debug('Socket is writeable')
if self._needs_handshake and not self._do_ssl_handshake():
return
total = 0
if not self._slice:
self._slice = self._buf_out[total:total + 65536]
try:
while True:
if total < len(self._buf_out) or self._slice:
sent = self._sock.send(self._slice)
sock_debug('Sent %s bytes. Last 10 bytes were %s' % (sent, self._slice[-10:]))
if not sent:
raise IndexError('LOL')
total += sent
self._slice = self._buf_out[total:total + 65536]
else:
self._buf_out = self._q.popleft().encode('utf-8')
total = 0
self._slice = self._buf_out[total:total + 65536]
except IndexError:
pass
except socket.error as e:
if e.errno not in write_again_errno:
raise
self._buf_out = self._buf_out[total:]
sock_debug('Done writing for now')
def read(self):
sock_debug('Socket is readable')
if self._needs_handshake and not self._do_ssl_handshake():
return
buf = ''.encode('utf-8')
while True:
try:
d = self._sock.recv(65536)
if not d:
break
buf += d
# ST2 on Windows with Package Control 3 support!
# (socket.recv blocks for some damn reason)
if G.SOCK_SINGLE_READ:
break
except AttributeError:
sock_debug('_sock is None')
return self.reconnect()
except (socket.error, TypeError) as e:
sock_debug('Socket error:', e)
break
if buf:
self._empty_reads = 0
# sock_debug('read data')
return self._handle(buf)
# sock_debug('empty select')
self._empty_reads += 1
if self._empty_reads > (3000 / G.TICK_TIME):
msg.error('No data from sock.recv() {0} times.'.format(self._empty_reads))
return self.reconnect()
def error(self):
raise NotImplementedError('error not implemented.')
def stop(self):
self._retries = -1
utils.cancel_timeout(self._reconnect_timeout)
self._reconnect_timeout = None
self.cleanup()
self.emit('stop')
msg.log('Disconnected.')
def reconnect(self):
if self._reconnect_timeout:
return
self.cleanup()
self._reconnect_delay = min(10000, int(1.5 * self._reconnect_delay))
if self._retries > 0:
msg.log('Floobits: Reconnecting in %sms' % self._reconnect_delay)
self._reconnect_timeout = utils.set_timeout(self.connect, self._reconnect_delay)
elif self._retries == 0:
editor.error_message('Floobits Error! Too many reconnect failures. Giving up.')
# Only use proxy.floobits.com if we're trying to connect to floobits.com
G.OUTBOUND_FILTERING = self.host == 'floobits.com' and self._retries % 4 == 0
self._retries -= 1
def reset_retries(self):
self._reconnect_delay = self.INITIAL_RECONNECT_DELAY
self._retries = self.MAX_RETRIES
def put(self, item):
if not item:
return
self.req_id += 1
item['req_id'] = self.req_id
msg.debug('writing ', item.get('name', 'NO NAME'),
' req_id ', self.req_id,
' qsize ', len(self))
self._q.append(json.dumps(item) + '\n')
return self.req_id
| |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""This is the interface for managing the foreman."""
import json
import time
from grr.gui import renderers
from grr.gui.plugins import flow_management
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
class ManageForeman(renderers.UserLabelCheckMixin, renderers.Splitter2Way):
"""Manages class based flow creation."""
description = "Automated flows"
behaviours = frozenset(["General"])
AUTHORIZED_LABELS = ["admin"]
top_renderer = "ReadOnlyForemanRuleTable"
bottom_renderer = "EmptyRenderer"
class RegexRuleArray(renderers.RDFValueArrayRenderer):
"""Nicely render all the rules."""
proxy_field = "regex_rules"
class ActionRuleArray(renderers.RDFValueArrayRenderer):
"""Nicely render all the actions for a rule."""
proxy_field = "actions"
translator = dict(argv=renderers.RDFProtoRenderer.ProtoDict)
class ReadOnlyForemanRuleTable(renderers.TableRenderer):
"""Show all the foreman rules."""
def __init__(self, **kwargs):
super(ReadOnlyForemanRuleTable, self).__init__(**kwargs)
self.AddColumn(renderers.RDFValueColumn("Created"))
self.AddColumn(renderers.RDFValueColumn("Expires"))
self.AddColumn(renderers.RDFValueColumn("Description"))
self.AddColumn(renderers.RDFValueColumn(
"Rules", renderer=RegexRuleArray, width="100%"))
def RenderAjax(self, request, response):
"""Renders the table."""
fd = aff4.FACTORY.Open("aff4:/foreman", token=request.token)
rules = fd.Get(fd.Schema.RULES, [])
for rule in rules:
self.AddRow(Created=rule.created,
Expires=rule.expires,
Description=rule.description,
Rules=rule,
Actions=rule)
# Call our baseclass to actually do the rendering
return super(ReadOnlyForemanRuleTable, self).RenderAjax(request, response)
class ForemanRuleTable(ReadOnlyForemanRuleTable, renderers.UserLabelCheckMixin):
"""Show all existing rules and allow for editing."""
selection_publish_queue = "rule_select"
AUTHORIZED_LABELS = ["admin"]
layout_template = renderers.TableRenderer.layout_template + """
<script>
//Receive the selection event and emit the rule creation time.
grr.subscribe("select_table_{{ id|escapejs }}", function(node) {
if (node) {
var row_id = node.attr("row_id");
grr.layout("AddForemanRule", "main_bottomPane", {rule_id: row_id});
grr.publish("{{ this.selection_publish_queue|escapejs }}", row_id);
};
}, '{{ unique|escapejs }}');
</script>
"""
def Layout(self, request, response):
# First render the toolbar.
ForemanToolbar().Layout(request, response)
return super(ForemanRuleTable, self).Layout(request, response)
class ForemanToolbar(renderers.TemplateRenderer):
"""Renders the toolbar."""
layout_template = renderers.Template("""
<button id="add_rule" title="Add a new rule." class="grr-button">
Add Rule
</button>
<script>
$("#add_rule").button().click(function () {
grr.layout("AddForemanRule", "main_bottomPane");
});
</script>
""")
class AddForemanRule(flow_management.FlowInformation):
"""Present a form to add a new rule."""
layout_template = renderers.Template("".join((
# This is the toolbar for manipulating the rule
"""
<div class="toolbar">
<button title="Add Condition" id="AddCondition" class="grr-button">
Add Condition
</button>
<button title="Add Action" id="AddAction" class="grr-button">
Add Action
</button>
<button title="Delete Rule" id="DeleteRule" class="grr-button">
Delete Rule
</button>
</div>
""",
# Scripts to add new rules based on jquery templates
"""<div id="form_{{unique|escapejs}}" class="FormBody">
<script id="addRuleTemplate" type="text/x-jquery-tmpl">
<tbody id="condition_row_${rule_number}">
<tr><td colspan=3 class="grr_aff4_type_header"><b>Regex Condition</b>
<a href="#" title="Remove condition"
onclick="$('#condition_row_${rule_number}').html('');">
<img src="/static/images/window-close.png" class="toolbar_icon">
</a>
</td></tr>
<tr><td class="proto_key">Path in client</td><td class="proto_value">
<input name="path_${rule_number}" type=text size=40 /></td></tr>
<tr><td class="proto_key">Attribute</td><td class="proto_value">
<select name="attribute_name_${rule_number}" type=text size=1>
{% for option in this.attributes %}
<option>{{option|escape}}</option>
{% endfor %}
</select>
</td> </tr>
<tr><td class="proto_key">Regex</td><td class="proto_value">
<input name="attribute_regex_${rule_number}" type=text size=40 /></td>
</tr>
</tbody>
</script>""",
# Scripts to add a new action based on jquery templates
"""<script id="addActionTemplate" type="text/x-jquery-tmpl">
<tbody id="action_row_${rule_number}">
<tr><td colspan=3 class="grr_aff4_type_header"><b>Action</b>
<a href="#" title="Remove Action"
onclick="$('#action_row_${rule_number}').html('');">
<img src="/static/images/window-close.png" class="toolbar_icon">
</a>
</td></tr>
<tr><td class="proto_key">Flow Name</td><td class="proto_value">
<select name="flow_name_${rule_number}" type=text size=1
onchange="grr.layout('RenderFlowForm', 'flow_form_${rule_number}',
{rule_id: ${rule_id}, flow: this.value,
action_id: ${rule_number}});">
<option>Select a Flow</option>
{% for option in this.flows %}
<option>{{option|escape}}</option>
{% endfor %}
</select>
</td></tr>
</tbody>
<tbody id="flow_form_${rule_number}"></tbody>
</script>""",
# Rendering the actual form
"""<h1>Add a new automated rule.</h1>
<form id="form">
<input type="hidden" name="rule_id" />
<table id="ForemanFormBody" class="form_table">
<tbody>
<tr><td class="proto_key">Created On</td>
<td class="proto_value">
<input type=text name="created_text" disabled="disabled"/></td>
</tr>
<tr><td class="proto_key">Expires On</td><td class="proto_value">
<input type=text size=20 name="expires_text"/>
</td></tr>
<tr><td class="proto_key">Description</td><td class="proto_value">
<input type=text size=20 name="description"/></td></tr>
</tbody>
</table>
<table id="ForemanFormRuleBody" class="form_table"></table>
<table id="ForemanFormActionBody" class="form_table"></table>
<input id="submit" type="submit" value="Launch"/>
</form>
</div>""",
# Initialize the form - adds actions to toolbar items
"""<script>
var defaults = {{ this.defaults|safe }};
// Submit button
$('#submit').button().click(function () {
return grr.submit('AddForemanRuleAction', 'form',
'form_{{unique|escapejs}}', false, grr.layout);
});
$('#AddAction').button().click(function () {
grr.foreman.add_action({rule_id: defaults.rule_id});
});
$('#AddCondition').button().click(function () {
grr.foreman.add_condition({rule_id: defaults.rule_id});
});
$('#DeleteRule').button().click(function () {
grr.layout('DeleteRule', 'form_{{unique|escapejs}}',
{rule_id: defaults.rule_id});
});
$("[name='expires_text']").datepicker(
{dateFormat: 'yy-mm-dd', numberOfMonths: 3});
// Place the first condition
grr.foreman.regex_rules = 0;
for (i=0; i<defaults.rule_count; i++) {
grr.foreman.add_condition(defaults);
};
grr.foreman.action_rules = 0;
for (i=0; i<defaults.action_count; i++) {
grr.foreman.add_action(defaults);
};
grr.update_form('form', defaults);
</script>
""")))
def Layout(self, request, response):
"""Render the AddForemanRule form."""
self.defaults = json.dumps(self.BuildDefaults(request))
self.flows = [x for x, cls in flow.GRRFlow.classes.items()
if cls.category]
self.flows.sort()
self.attributes = [x.name for x in aff4.Attribute.NAMES.values()]
self.attributes.sort()
return renderers.TemplateRenderer.Layout(self, request, response)
def BuildDefaults(self, request):
"""Prepopulate defaults from old entry."""
rule_id = request.REQ.get("rule_id")
result = dict(created=int(time.time() * 1e6),
expires=int(time.time() + 60 * 60 * 24) * 1e6,
rule_count=1, action_count=1, rule_id=-1)
if rule_id is not None:
result["rule_id"] = int(rule_id)
fd = aff4.FACTORY.Open("aff4:/foreman", token=request.token)
rules = fd.Get(fd.Schema.RULES)
if rules is not None:
rule = rules[result["rule_id"]]
# Make up the get parameters
result.update(dict(created=rule.created, expires=rule.expires,
description=rule.description))
for i, regex_rule in enumerate(rule.regex_rules):
for field_desc, value in regex_rule.ListFields():
result["%s_%s" % (field_desc.name, i)] = str(value)
result["rule_count"] = i + 1
for i, action_rule in enumerate(rule.actions):
result["flow_name_%s" % i] = action_rule.flow_name
result["action_count"] = len(rule.actions)
# Expand the human readable defaults
result["created_text"] = str(rdfvalue.RDFDatetime(result["created"]))
result["expires_text"] = str(rdfvalue.RDFDatetime(result["expires"]))
return result
class RenderFlowForm(AddForemanRule, flow_management.FlowForm):
"""Render a customized form for a foreman action."""
layout_template = renderers.Template("""
{% for row in this.form_elements %}
<tr> {{ row|escape }} </tr>
{% endfor %}
<script>
// Fixup checkboxes so they return values even if unchecked.
$(".FormBody").find("input[type=checkbox]").change(function() {
$(this).attr("value", $(this).attr("checked") ? "True" : "False");
});
</script>
""")
def Layout(self, request, response):
"""Fill in the form with the specific fields for the flow requested."""
response = renderers.Renderer.Layout(self, request, response)
rule_id = request.REQ.get("rule_id")
requested_flow_name = request.REQ.get("flow", "ListDirectory")
rule_number = int(request.REQ.get("action_id", 0))
args = []
if rule_id is not None and int(rule_id) != -1:
rule_id = int(rule_id)
fd = aff4.FACTORY.Open("aff4:/foreman", token=request.token)
rules = fd.Get(fd.Schema.RULES)
if rules is not None:
try:
rule = rules[rule_id]
action = rule.actions[rule_number]
flow_name = action.flow_name
# User has not changed the existing flow
if flow_name == requested_flow_name:
action_argv = action.argv.ToDict()
flow_class = flow.GRRFlow.classes[flow_name]
args = self.GetArgs(flow_class, request,
arg_template="v_%%s_%s" % rule_number)
fields = []
for desc, field, _, default in args:
fields.append((desc, field, action_argv[desc], default))
args = fields
except IndexError:
pass
# User changed the flow - do not count existing values
if not args:
flow_class = flow.GRRFlow.classes[requested_flow_name]
args = self.GetArgs(flow_class, request,
arg_template="v_%%s_%s" % rule_number)
self.form_elements = self.RenderFormElements(args)
return renderers.TemplateRenderer.Layout(self, request, response)
class AddForemanRuleAction(flow_management.FlowFormAction,
renderers.UserLabelCheckMixin):
"""Receive the parameters."""
AUTHORIZED_LABELS = ["admin"]
layout_template = renderers.Template("""
Created a new automatic rule:
<pre> {{ this.foreman_rule|escape }}</pre>
<script>
grr.publish("grr_messages", "Created Foreman Rule");
</script>
""")
error_template = renderers.Template("""
Error: {{ message|escape }}
""")
def ParseRegexRules(self, request, foreman_rule):
"""Parse out the request and fill in foreman rules."""
# These should be more than enough
for i in range(100):
try:
foreman_rule.regex_rules.add(
path=request.REQ["path_%s" % i],
attribute_name=request.REQ["attribute_name_%s" % i],
attribute_regex=request.REQ["attribute_regex_%s" % i])
except KeyError:
pass
def ParseActionRules(self, request, foreman_rule):
"""Parse and add actions to foreman rule."""
for i in range(100):
flow_name = request.REQ.get("flow_name_%s" % i)
if not flow_name: continue
flow_class = flow.GRRFlow.classes[flow_name]
arg_list = self.GetArgs(flow_class, request,
arg_template="v_%%s_%s" % i)
args = dict([(k, v) for (k, _, _, v, _) in arg_list])
foreman_rule.actions.add(flow_name=flow_name,
argv=rdfvalue.ProtoDict(args))
def AddRuleToForeman(self, foreman_rule, token):
"""Add the rule to the foreman."""
fd = aff4.FACTORY.Create("aff4:/foreman", "GRRForeman",
mode="rw", token=token)
rules = fd.Get(fd.Schema.RULES)
if rules is None: rules = fd.Schema.RULES()
rules.Append(foreman_rule)
fd.Set(fd.Schema.RULES, rules)
fd.Close()
@renderers.ErrorHandler()
def Layout(self, request, response):
"""Process the form action and add a new rule."""
expire_date = rdfvalue.RDFDatetime.ParseFromHumanReadable(
request.REQ["expires_text"])
self.foreman_rule = rdfvalue.ForemanRule(
description=request.REQ.get("description", ""),
created=rdfvalue.RDFDatetime().Now(),
expires=expire_date)
# Check for sanity
if self.foreman_rule.expires < self.foreman_rule.created:
return self.RenderFromTemplate(self.error_template, response,
message="Rule already expired?")
self.ParseRegexRules(request, self.foreman_rule)
self.ParseActionRules(request, self.foreman_rule)
self.AddRuleToForeman(self.foreman_rule, request.token)
return renderers.TemplateRenderer.Layout(self, request, response)
class DeleteRule(renderers.TemplateRenderer, renderers.UserLabelCheckMixin):
"""Remove the specified rule from the foreman."""
AUTHORIZED_LABELS = ["admin"]
layout_template = renderers.Template("""
<h1> Removed rule {{this.rule_id|escape}} </h1>
""")
def Layout(self, request, response):
"""Remove the rule from the foreman."""
self.rule_id = int(request.REQ.get("rule_id", -1))
fd = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=request.token)
rules = fd.Get(fd.Schema.RULES)
new_rules = fd.Schema.RULES()
if self.rule_id >= 0 and rules is not None:
for i, rule in enumerate(rules):
if i == self.rule_id: continue
new_rules.Append(rule)
# Replace the rules with the new ones
fd.Set(fd.Schema.RULES, new_rules)
fd.Close()
return renderers.TemplateRenderer.Layout(self, request, response)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# Copyright (C) 2011 Matthew Good
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Mapping from raw JSON data structures to Python objects and vice versa.
To define a document mapping, you declare a Python class inherited from
`Mapping`, and add any number of `Field` attributes:
>>> from jsonmapper import TextField, IntegerField, DateField
>>> class Person(Mapping):
... name = TextField()
... age = IntegerField()
... added = DateTimeField(default=datetime.now)
>>> person = Person(name='John Doe', age=42)
>>> person #doctest: +ELLIPSIS
<Person ...>
>>> person.age
42
"""
import copy
from calendar import timegm
from datetime import date, datetime, time
from decimal import Decimal
from time import strptime, struct_time
__all__ = ['Mapping', 'Field', 'TextField', 'FloatField',
'IntegerField', 'LongField', 'BooleanField', 'DecimalField',
'DateField', 'DateTimeField', 'TimeField', 'DictField', 'ListField',
'TypedField',
]
__docformat__ = 'restructuredtext en'
DEFAULT = object()
class Field(object):
"""Basic unit for mapping a piece of data between Python and JSON.
Instances of this class can be added to subclasses of `Mapping` to describe
the mapping of a document.
"""
def __init__(self, name=None, default=None):
self.name = name
self.default = default
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._to_python(value)
elif self.default is not None:
default = self.default
if callable(default):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._to_json(value)
instance._data[self.name] = value
def _to_python(self, value):
return unicode(value)
def _to_json(self, value):
return self._to_python(value)
class MappingMeta(type):
def __new__(cls, name, bases, d):
fields = {}
for base in bases:
if hasattr(base, '_fields'):
fields.update(base._fields)
for attrname, attrval in d.items():
if isinstance(attrval, Field):
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type.__new__(cls, name, bases, d)
class Mapping(object):
__metaclass__ = MappingMeta
def __init__(self, **values):
self._data = {}
for attrname, field in self._fields.items():
if attrname in values:
setattr(self, attrname, values.pop(attrname))
else:
setattr(self, attrname, getattr(self, attrname))
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self._data)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data or ())
def __delitem__(self, name):
del self._data[name]
def __getitem__(self, name):
return self._data[name]
def __setitem__(self, name, value):
self._data[name] = value
def get(self, name, default=None):
return self._data.get(name, default)
def setdefault(self, name, default):
return self._data.setdefault(name, default)
def unwrap(self):
return self._data
@classmethod
def build(cls, **d):
fields = {}
for attrname, attrval in d.items():
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type('AnonymousStruct', (cls,), d)
@classmethod
def wrap(cls, data):
instance = cls()
instance._data = data
return instance
def _to_python(self, value):
return self.wrap(value)
def _to_json(self, value):
return self.unwrap()
def items(self):
"""Return the fields as a list of ``(name, value)`` tuples.
This method is provided to enable easy conversion to native dictionary
objects, for example to allow use of `Mapping` instances with
`client.Database.update`.
>>> class Post(Mapping):
... id = TextField()
... title = TextField()
... author = TextField()
>>> post = Post(id='foo-bar', title='Foo bar', author='Joe')
>>> sorted(post.items())
[('author', u'Joe'), ('id', u'foo-bar'), ('title', u'Foo bar')]
:return: a list of ``(name, value)`` tuples
"""
return self._data.items()
class TextField(Field):
"""Mapping field for string values."""
_to_python = unicode
class FloatField(Field):
"""Mapping field for float values."""
_to_python = float
class IntegerField(Field):
"""Mapping field for integer values."""
_to_python = int
class LongField(Field):
"""Mapping field for long integer values."""
_to_python = long
class BooleanField(Field):
"""Mapping field for boolean values."""
_to_python = bool
class DecimalField(Field):
"""Mapping field for decimal values."""
def _to_python(self, value):
return Decimal(value)
def _to_json(self, value):
return unicode(value)
class DateField(Field):
"""Mapping field for storing dates.
>>> field = DateField()
>>> field._to_python('2007-04-01')
datetime.date(2007, 4, 1)
>>> field._to_json(date(2007, 4, 1))
'2007-04-01'
>>> field._to_json(datetime(2007, 4, 1, 15, 30))
'2007-04-01'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = date(*strptime(value, '%Y-%m-%d')[:3])
except ValueError:
raise ValueError('Invalid ISO date %r' % value)
return value
def _to_json(self, value):
if isinstance(value, datetime):
value = value.date()
return value.isoformat()
class DateTimeField(Field):
"""Mapping field for storing date/time values.
>>> field = DateTimeField()
>>> field._to_python('2007-04-01T15:30:00Z')
datetime.datetime(2007, 4, 1, 15, 30)
>>> field._to_json(datetime(2007, 4, 1, 15, 30, 0, 9876))
'2007-04-01T15:30:00Z'
>>> field._to_json(date(2007, 4, 1))
'2007-04-01T00:00:00Z'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = value.rstrip('Z') # remove timezone separator
value = datetime(*strptime(value, '%Y-%m-%dT%H:%M:%S')[:6])
except ValueError:
raise ValueError('Invalid ISO date/time %r' % value)
return value
def _to_json(self, value):
if isinstance(value, struct_time):
value = datetime.utcfromtimestamp(timegm(value))
elif not isinstance(value, datetime):
value = datetime.combine(value, time(0))
return value.replace(microsecond=0).isoformat() + 'Z'
class TimeField(Field):
"""Mapping field for storing times.
>>> field = TimeField()
>>> field._to_python('15:30:00')
datetime.time(15, 30)
>>> field._to_json(time(15, 30))
'15:30:00'
>>> field._to_json(datetime(2007, 4, 1, 15, 30))
'15:30:00'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = time(*strptime(value, '%H:%M:%S')[3:6])
except ValueError:
raise ValueError('Invalid ISO time %r' % value)
return value
def _to_json(self, value):
if isinstance(value, datetime):
value = value.time()
return value.replace(microsecond=0).isoformat()
class DictField(Field):
"""Field type for nested dictionaries.
>>> class Post(Mapping):
... title = TextField()
... content = TextField()
... author = DictField(Mapping.build(
... name = TextField(),
... email = TextField()
... ))
... extra = DictField()
>>> post = Post(
... title='Foo bar',
... author=dict(name='John Doe',
... email='john@doe.com'),
... extra=dict(foo='bar'),
... )
>>> post #doctest: +ELLIPSIS
<Post ...>
>>> post.author.name
u'John Doe'
>>> post.author.email
u'john@doe.com'
>>> post.extra
{'foo': 'bar'}
>>> class Blog(Mapping):
... post = DictField(Post)
>>> blog = Blog.wrap({'post': {'title': 'Foo', 'author': {'name': 'Jane Doe', 'email': 'jane@doe.com'}, 'extra': {}}})
>>> blog.post.title
u'Foo'
>>> blog = Blog(post=post)
>>> blog.post.author.name
u'John Doe'
"""
def __init__(self, mapping=None, name=None, default=None):
default = default or {}
Field.__init__(self, name=name, default=lambda: default.copy())
self.mapping = mapping
def _to_python(self, value):
if self.mapping is None:
return value
else:
return self.mapping.wrap(value)
def _to_json(self, value):
if self.mapping is None:
return value
if not isinstance(value, Mapping):
value = self.mapping(**value)
return value.unwrap()
class TypedField(Field):
"""Chooses the mapping based on a "type" field for polymorphic data mapping.
>>> class Foo(Mapping):
... x = TextField()
>>> class Bar(Mapping):
... y = TextField()
>>> class Baz(Mapping):
... z = TypedField({'foo': Foo, 'bar': Bar})
>>> Baz.wrap({'z': {'type': 'foo', 'x': 'hello'}}).z
<Foo {'x': 'hello', 'type': 'foo'}>
>>> Baz.wrap({'z': {'type': 'bar', 'y': 'world'}}).z
<Bar {'y': 'world', 'type': 'bar'}>
"""
def __init__(self, mappings, type_key='type', name=None, default=None):
if default is not None:
default = lambda: default.copy()
Field.__init__(self, name=name, default=default)
self.type_key = type_key
self.mappings = mappings
def _to_python(self, value):
mapping = self.mappings[value[self.type_key]]
return mapping.wrap(value)
def _to_json(self, value):
if isinstance(value, Mapping):
for value_type, mapping in self.mappings.iteritems():
if isinstance(value, mapping):
break
else:
# FIXME better error message
raise ValueError('Unknown value type')
else:
value_type = value[self.type_key]
mapping = self.mappings[value_type]
value = mapping(**value)
value = value.unwrap()
value[self.type_key] = value_type
return value
class ListField(Field):
"""Field type for sequences of other fields.
>>> class Post(Mapping):
... title = TextField()
... content = TextField()
... pubdate = DateTimeField(default=datetime.now)
... comments = ListField(DictField(Mapping.build(
... author = TextField(),
... content = TextField(),
... time = DateTimeField()
... )))
>>> post = Post(title='Foo bar')
>>> post.comments.append(author='myself', content='Bla bla',
... time=datetime.now())
>>> len(post.comments)
1
>>> post #doctest: +ELLIPSIS
<Post ...>
>>> comment = post.comments[0]
>>> comment['author']
u'myself'
>>> comment['content']
u'Bla bla'
>>> comment['time'] #doctest: +ELLIPSIS
'...T...Z'
"""
def __init__(self, field, name=None, default=None):
default = default or []
Field.__init__(self, name=name, default=lambda: copy.copy(default))
if type(field) is type:
if issubclass(field, Field):
field = field()
elif issubclass(field, Mapping):
field = DictField(field)
self.field = field
def _to_python(self, value):
return self.Proxy(value, self.field)
def _to_json(self, value):
return [self.field._to_json(item) for item in value]
class Proxy(list):
def __init__(self, list, field):
self.list = list
self.field = field
def __lt__(self, other):
return self.list < other
def __le__(self, other):
return self.list <= other
def __eq__(self, other):
return self.list == other
def __ne__(self, other):
return self.list != other
def __gt__(self, other):
return self.list > other
def __ge__(self, other):
return self.list >= other
def __repr__(self):
return repr(self.list)
def __str__(self):
return str(self.list)
def __unicode__(self):
return unicode(self.list)
def __delitem__(self, index):
del self.list[index]
def __getitem__(self, index):
return self.field._to_python(self.list[index])
def __setitem__(self, index, value):
self.list[index] = self.field._to_json(value)
def __delslice__(self, i, j):
del self.list[i:j]
def __getslice__(self, i, j):
return ListField.Proxy(self.list[i:j], self.field)
def __setslice__(self, i, j, seq):
self.list[i:j] = (self.field._to_json(v) for v in seq)
def __contains__(self, value):
for item in self.list:
if self.field._to_python(item) == value:
return True
return False
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.list)
def __nonzero__(self):
return bool(self.list)
def append(self, *args, **kwargs):
if args or not isinstance(self.field, DictField):
if len(args) != 1:
raise TypeError('append() takes exactly one argument '
'(%s given)' % len(args))
value = args[0]
else:
value = kwargs
self.list.append(self.field._to_json(value))
def count(self, value):
return [i for i in self].count(value)
def extend(self, list):
for item in list:
self.append(item)
def index(self, value):
return self.list.index(self.field._to_json(value))
def insert(self, idx, *args, **kwargs):
if args or not isinstance(self.field, DictField):
if len(args) != 1:
raise TypeError('insert() takes exactly 2 arguments '
'(%s given)' % len(args))
value = args[0]
else:
value = kwargs
self.list.insert(idx, self.field._to_json(value))
def remove(self, value):
return self.list.remove(self.field._to_json(value))
def pop(self, *args):
return self.field._to_python(self.list.pop(*args))
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import inspect
import re
import shlex
import sre_constants
import os
__developer_api__ = True
class CfgType(object):
""" A config value type wrapper -- gives a config value a conversion
to and from a string, a way to copy values, and a way to print
the string for display (if different from converting to a string)
NOTE: most subclasses probably don't have to implement all of these
methods, for most it will be enough to implement parseString.
If the subclass is a list or a dictionary, subclassing from
CfgDict should mean that parseString is still all that needs
to be overridden.
"""
# if a default isn't specified for a subclass CfgType, it defaults to None
default = None
def copy(self, val):
""" Create a new copy of the given value """
return copy.deepcopy(val)
def parseString(self, str):
""" Parse the given value.
The return value should be as is expected to be assigned to a
configuration item.
"""
return str
def updateFromString(self, val, str):
""" Parse the given value, and apply it to the current value.
The return value should be as is expected to be assigned to a
configuration item.
It's possible for many simple configuration items that if you
set a config value twice, the second assignment overwrites the
first. In this case, val can be ignored.
Modifying val in place is acceptable.
"""
return self.parseString(str)
def setFromString(self, val, str):
""" Parse the given value, and return the value that you'd expect
if the parsed value were supposed to replace val.
The return value should be as is expected to be assigned to a
configuration item where val is currently.
It's possible for many simple configuration items that if you
set a config value twice, the second assignment overwrites the
first. In this case, val can be ignored.
Modifying val in place is acceptable.
Generally, this is the same thing as parseString,
except in odd cases such as CfgCallback.
"""
return self.parseString(str)
def set(self, curVal, newVal):
return self.copy(newVal)
def getDefault(self, default=None):
""" Get the default value for this CfgType
"""
if default is not None:
return self.copy(default)
else:
return self.copy(self.default)
def format(self, val, displayOptions=None):
""" Return a formated version of val in a format determined by
displayOptions.
"""
return str(val)
def toStrings(self, val, displayOptions=None):
if displayOptions is None:
displayOptions = {}
if val is None:
return ['None']
return [self.format(val, displayOptions)]
#---------- simple configuration item types
# A configuration type converts from string -> ConfigValue and from
# ConfigValue -> string, and may store information about how to make that
# change, but does NOT contain actual configuration values.
CfgString = CfgType
def _pathIsAbsolute(path):
if path in ['stdin', 'stdout', 'stderr', ':memory:']:
return True
if '$' in path or '~' in path:
return False
return os.path.isabs(path)
_pathCache = {}
def Path(path):
cached = _pathCache.get(path)
absolute = _pathIsAbsolute(path)
if absolute:
if cached:
return cached
p = _Path(path)
else:
try:
p = _ExpandedPath(path)
except OSError:
p = _Path(path)
if cached == p:
p = cached
else:
_pathCache[path] = p
return p
class _Path(str):
__slots__ = []
def _getUnexpanded(self):
return self
def __repr__(self):
return "<Path '%s'>" % self
class _ExpandedPath(_Path):
def __new__(cls, origString):
string = os.path.abspath(os.path.expanduser(os.path.expandvars(origString)))
return str.__new__(cls, string)
def __init__(self, origString):
self._origString = origString
def _getUnexpanded(self):
return self._origString
def __repr__(self):
return "<Path '%s'>" % self
class CfgPath(CfgType):
"""
String configuration option that accepts ~ as a substitute for $HOME
"""
def parseString(self, str):
return Path(str)
def getDefault(self, default=None):
val = CfgType.getDefault(self, default)
if val:
return Path(val)
else:
return val
def format(self, val, displayOptions=None):
if displayOptions is None:
displayOptions = {}
if (not displayOptions.get('expandPaths', False)
and hasattr(val, '_getUnexpanded')):
return val._getUnexpanded()
else:
return str(val)
class CfgInt(CfgType):
def parseString(self, val):
try:
return int(val)
except ValueError, msg:
raise ParseError, 'expected integer'
class CfgBool(CfgType):
default = False
def parseString(self, val):
if val.lower() in ('0', 'false'):
return False
elif val.lower() in ('1', 'true'):
return True
else:
raise ParseError, "expected True or False"
class CfgRegExp(CfgType):
""" RegularExpression type.
Stores the value as (origVal, compiledVal)
"""
def copy(self, val):
return (val[0], re.compile(val[0]))
def parseString(self, val):
try:
return (val, re.compile(val))
except sre_constants.error, e:
raise ParseError, str(e)
def format(self, val, displayOptions=None):
return val[0]
class CfgSignedRegExp(CfgRegExp):
"""SignedRegularExpression type.
Allows for positive and negative regexp matching.
Stores the value as (origVal, sense, compiledVal)
"""
def copy(self, val):
return (val[0], val[1], re.compile(val[0]))
def parseString(self, val):
sense = 0
if val[0] == "+":
sense = 1
elif val[0] == "-":
sense = -1
else:
raise ParseError, "regexp value '%s' needs to start with + or -" % (val,)
try:
return (val, sense, re.compile(val[1:]))
except sre_constants.error, e:
raise ParseError, "regexp '%s' parse error\n" % (val[1:],) + str(e)
def format(self, val, displayOptions=None):
return "%s%s" % ("- +"[val[1]+1], val[0])
class CfgEnum(CfgType):
""" Enumerated value type. Checks to ensure the strings passed in are
matched in self.validValues
validValues can be a list or dict initially, but will be reset to a dict
"""
validValues = {}
origName = {}
def checkEntry(self, val):
if val.lower() not in self.validValues:
raise ParseError, '%s not in (case insensitive): %s' % (str(val),
'|'.join(self.validValues))
def parseString(self, val):
self.checkEntry(val)
return self.validValues[val.lower()]
def format(self, val, displayOptions):
if val not in self.origName:
raise ParseError, "%s not in: %s" % (str(val),
'|'.join([str(x) for x in self.origName]))
return self.origName[val]
def __init__(self):
CfgType.__init__(self)
if isinstance(self.validValues, list):
self.origName = dict([(x, x) for x in self.validValues])
self.validValues = dict([(x.lower(), x) for x in self.validValues])
else:
self.origName = dict([(x[1], x[0]) \
for x in self.validValues.iteritems()])
self.validValues = dict([(x[0].lower(), x[1]) \
for x in self.validValues.iteritems()])
class CfgCallBack(CfgType):
def __init__(self, callBackFn, *params):
self.callBackFn = callBackFn
self.params = params
def setFromString(self, curVal, str):
self.callBack(str)
def updateFromString(self, curVal, str):
self.callBack(str)
def callBack(self, val):
self.callBackFn(*((val,) + self.params))
# ---- configuration structures
# Below here are more complicated configuration structures.
# They allow you to go from string -> container
# The abstract containers can all be modified to change their container
# type, and their item type.
class CfgLineList(CfgType):
def __init__(self, valueType, separator=None, listType=list, default=[]):
if inspect.isclass(valueType) and issubclass(valueType, CfgType):
valueType = valueType()
self.listType = listType
self.separator = separator
self.valueType = valueType
self.default = default
def parseString(self, val):
if val == '[]':
return self.listType()
return self.listType(self.valueType.parseString(x) \
for x in val.split(self.separator) if x)
def getDefault(self, default=None):
if default is None:
default = self.default
return [ self.valueType.getDefault(x) for x in default ]
def updateFromString(self, val, str):
return self.parseString(str)
def copy(self, val):
return self.listType(self.valueType.copy(x) for x in val)
def toStrings(self, value, displayOptions=None):
if value:
separator = self.separator
if separator is None:
separator = ' '
yield separator.join(
self.valueType.format(x, displayOptions) for x in value)
class CfgQuotedLineList(CfgLineList):
def __init__(self, valueType, listType=list, default=[]):
CfgLineList.__init__(self, valueType=valueType, listType=listType,
default=default)
def parseString(self, val):
return self.listType(self.valueType.parseString(x) \
for x in shlex.split(val) if x)
def toStrings(self, value, displayOptions=None):
if displayOptions is None:
displayOptions = {}
if value:
yield "'" + "' '".join(
[self.valueType.format(x, displayOptions) for x in value]) + "'"
class CfgList(CfgType):
def __init__(self, valueType, listType=list, default=[]):
if inspect.isclass(valueType) and issubclass(valueType, CfgType):
valueType = valueType()
self.valueType = valueType
self.listType = listType
self.default = default
def parseString(self, val):
if val == '[]':
return self.listType()
return self.listType([self.valueType.parseString(val)])
def updateFromString(self, val, str):
if str == '[]':
return self.listType()
val.extend(self.parseString(str))
return val
def getDefault(self, default=None):
if default is None:
default = self.default
return self.listType(self.valueType.getDefault(x) for x in default)
def copy(self, val):
return self.listType(self.valueType.copy(x) for x in val)
def toStrings(self, value, displayOptions=None):
if displayOptions is None:
displayOptions = {}
if not value:
yield '[]'
else:
for val in value:
for str in self.valueType.toStrings(val, displayOptions):
yield str
class CfgDict(CfgType):
def __init__(self, valueType, dictType=dict, default={}):
if inspect.isclass(valueType) and issubclass(valueType, CfgType):
valueType = valueType()
self.valueType = valueType
self.dictType = dictType
self.default = default
def setFromString(self, val, str):
return self.updateFromString(self.dictType(), str)
def set(self, curVal, newVal):
curVal.update(newVal)
return curVal
def updateFromString(self, val, str):
# update the dict value -- don't just overwrite it, it might be
# that the dict value is a list, so we call updateFromString
strs = str.split(None, 1)
if len(strs) == 1:
dkey, dvalue = strs[0], ''
else:
(dkey, dvalue) = strs
if dkey in val:
val[dkey] = self.valueType.updateFromString(val[dkey], dvalue)
else:
val[dkey] = self.parseValueString(dkey, dvalue)
return val
def parseString(self, val):
return self.updateFromString({}, val)
def parseValueString(self, key, value):
return self.valueType.parseString(value)
def getDefault(self, default=None):
if default is None:
default = self.default
return self.dictType((x,self.valueType.getDefault(y)) \
for (x,y) in default.iteritems())
def toStrings(self, value, displayOptions):
for key in sorted(value.iterkeys()):
val = value[key]
for item in self.valueType.toStrings(val, displayOptions):
if displayOptions and displayOptions.get('prettyPrint', False):
key = '%-25s' % key
yield ' '.join((key, item))
def copy(self, val):
return dict((k, self.valueType.copy(v)) for k,v in val.iteritems())
class CfgEnumDict(CfgDict):
validValues = {}
def __init__(self, valueType=CfgString, default={}):
CfgDict.__init__(self, valueType, default=default)
def checkEntry(self, val):
k, v = val.split(None, 1)
k = k.lower()
v = v.lower()
if k not in self.validValues:
raise ParseError, 'invalid key "%s" not in "%s"' % (k,
'|'.join(self.validValues.keys()))
if v not in self.validValues[k]:
raise ParseError, 'invalid value "%s" for key %s not in "%s"' % (v,
k, '|'.join(self.validValues[k]))
def parseString(self, val):
self.checkEntry(val)
return CfgDict.parseString(self, val)
class RegularExpressionList(list):
""" This is the actual configuration value -- NOT a config type.
The CfgRegExpList returns values of this class.
"""
def __repr__(self):
return 'RegularExpressionList(%s)' % list.__repr__(self)
def __deepcopy__(self, memo):
return RegularExpressionList(self)
def addExp(self, val):
list.append(self, CfgRegExp().parseString(val))
def match(self, s):
for reStr, regExp in self:
if regExp.match(s):
return True
return False
class SignedRegularExpressionList(list):
"""
Like a RegularExpressionList, but each member regexp is tagged with a
+ or - to signify a positive or negative match. Match return values
are -1, 0 or 1 for a negative, unknown or positive match, respectively
First match wins.
"""
def __repr__(self):
return "SignedRegularExpressionList(%s)" % list.__repr__(self)
def addExp(self, val):
list.append(self, CfgSignedRegExp().parseString(val))
def match(self, s):
for reStr, sense, regExp in self:
if regExp.match(s):
return sense
return 0
class CfgRegExpList(CfgList):
listType = RegularExpressionList
valueType = CfgRegExp
def __init__(self, default=listType()):
CfgList.__init__(self, valueType=self.valueType, listType=self.listType,
default=default)
def parseString(self, val):
if val == '[]':
return self.listType()
return self.listType([self.valueType.parseString(x)
for x in val.split()])
class CfgSignedRegExpList(CfgRegExpList):
listType = SignedRegularExpressionList
valueType = CfgSignedRegExp
CfgPathList = CfgLineList(CfgPath, ':')
class CfgBytes(CfgType):
"""
Scalar type that holds either a number of bytes or a rate in bytes per
second.
"""
scales = [
('G', 1000000000),
('Gi', 1024*1024*1024),
('M', 1000000),
('Mi', 1024*1024),
('k', 1000),
('K', 1000),
('ki', 1024),
('Ki', 1024),
('', 1),
]
scales_d = dict(scales)
pattern = '^(\d+) *([kKMG]?i?)([bB]?)[/p]?s?$'
def __init__(self, defaultScale='', perSecond=False):
assert defaultScale in self.scales_d
self.perSecond = perSecond
self.defaultScale = defaultScale
def parseString(self, text):
m = re.search(self.pattern, text)
if not m:
if self.perSecond:
raise ParseError("invalid rate '%s'. Example: 123 kB/s" %
(text,))
else:
raise ParseError("invalid byte value '%s'. Example: 123 MB" %
(text,))
value, suffix, isBytes = m.groups()
if not suffix and not isBytes:
suffix = self.defaultScale
value = int(value) * self.scales_d[suffix]
return value
def format(self, val, displayOptions=None):
if not val:
suffix = self.defaultScale
else:
for suffix, scale in self.scales:
if val % scale == 0:
val //= scale
break
return '%d %sB%s' % (val, suffix, '/s' if self.perSecond else '')
# --- errors
class CfgError(Exception):
"""
Ancestor for all exceptions raised by the cfg module.
"""
pass
class ParseError(CfgError):
"""
Indicates that an error occurred parsing the config file.
"""
def __str__(self):
return self.val
def __init__(self, val):
self.val = str(val)
class CfgEnvironmentError(CfgError):
def __str__(self):
return "Error reading config file %s: %s" % (self.path, self.msg)
def __init__(self, path, msg):
self.msg = msg
self.path = path
| |
import unittest
import numpy as np
import os
from fipy.tools import dump
from scarce.testing import tools
from scarce import constant
from scarce import fields, geometry
class TestFields(unittest.TestCase):
@classmethod
def tearDownClass(cls):
os.remove('planar_mesh_tmp_2.msh')
def test_w_potential_analytic(self):
''' Check analytic weighting potential calculation
for planar sensors.
'''
self.assertTrue(tools.check_with_fixture(
fields.get_weighting_potential_analytic,
x=np.linspace(-200, 200, 200),
y=np.linspace(0, 250, 200),
D=[100, 150, 200, 250],
S=[5, 50, 100, 150, 250],
is_planar=True))
def test_w_field_analytic(self):
''' Check analytic weighting field calculation
for planar sensors.
'''
self.assertTrue(tools.check_with_fixture(
fields.get_weighting_field_analytic,
x=np.linspace(-200, 200, 200),
y=np.linspace(0, 250, 200),
D=[100, 150, 200, 250],
S=[5, 50, 100, 150, 250],
is_planar=True))
def test_w_pot_field_analytic(self):
''' Check weighting potential/field of planar sensor.
Check if grad(-Phi) = E_w_x, E_w_y
'''
for width in [5., 50, 250]:
for thickness in [50., 100., 200.]:
x = np.linspace(-width * 2, width * 2, 1000)
y = np.linspace(0, thickness, 1000)
xx, yy = np.meshgrid(x, y, sparse=True)
E_w_x, E_w_y = fields.get_weighting_field_analytic(
xx, yy, D=thickness, S=width, is_planar=True)
Phi_w = fields.get_weighting_potential_analytic(
xx, yy, D=thickness, S=width, is_planar=True)
# Check for constant gradient
assert np.allclose(np.gradient(x), np.gradient(x)[0])
assert np.allclose(np.gradient(y), np.gradient(y)[0])
E_w_y_2, E_w_x_2 = np.gradient(-Phi_w, np.gradient(y)[0],
np.gradient(x)[0])
array_1 = np.array([E_w_x, E_w_y])
array_2 = np.array([E_w_x_2, E_w_y_2])
# Assert that less than 1 % of the field poinst have an error >
# 1%
self.assertLess(tools.compare_arrays(array_1,
array_2,
threshold=0.01), 0.01)
def test_weighting_potential_planar(self):
''' Compares estimated weighting potential to analytical solution.
'''
# Influences how correct the field for the center pixel(s) is
# due to more far away infinite boundary condition
n_pixel = 11
for i, width in enumerate([50., 200.]):
# FIXME: 50 um thichness does not work
for j, thickness in enumerate([50., 250.]):
# Analytical solution only existing for pixel width = readout
# pitch (100% fill factor)
pitch = width
# Tune resolution properly for time/accuracy trade off
if i == 0 and j == 0:
resolution = 200
continue
elif i == 0 and j == 1:
resolution = 100
continue
elif i == 1 and j == 0:
resolution = 600
continue # FIXME: 50 thichness / 200 width does not work
elif i == 1 and j == 1:
resolution = 200
else:
raise RuntimeError('Loop index unknown')
mesh = geometry.mesh_planar_sensor(
n_pixel=n_pixel,
width=width,
thickness=thickness,
resolution=resolution,
filename='planar_mesh_tmp_2.msh')
potential = fields.calculate_planar_sensor_w_potential(
mesh=mesh,
width=width,
pitch=pitch,
n_pixel=n_pixel,
thickness=thickness)
min_x, max_x = -width * float(n_pixel), width * float(n_pixel)
min_y, max_y = 0., thickness
nx, ny = 1000, 1000
potential_description = fields.Description(potential,
min_x=min_x,
max_x=max_x,
min_y=min_y,
max_y=max_y,
nx=nx,
ny=ny,
smoothing=0.1)
def potential_analytic(x, y):
return fields.get_weighting_potential_analytic(
x, y,
D=thickness,
S=width,
is_planar=True)
# Create x,y grid
x = np.linspace(min_x, max_x, nx)
y = np.linspace(min_y, max_y, ny)
xx, yy = np.meshgrid(x, y, sparse=True)
# Evaluate potential on a grid
pot_analytic = potential_analytic(xx, yy)
pot_numeric = potential_description.get_potential(xx, yy)
# import matplotlib.pyplot as plt
# for pos_x in [0, 10, 15, 30, 45]:
# plt.plot(y, pot_analytic.T[nx / 2 + pos_x, :],
# label='Analytic')
# for pos_x in [0, 10, 15, 30, 45]:
# plt.plot(y, pot_numeric.T[nx / 2 + pos_x, :],
# label='Numeric')
# plt.legend(loc=0)
# plt.show()
# Check only at center pixel, edge pixel are not interessting
for pos_x in [-45, -30, -15, -10, 0, 10, 15, 30, 45]:
sel = pot_analytic.T[nx / 2 + pos_x, :] > 0.01
# Check with very tiny and tuned error allowance
self.assertTrue(np.allclose(
pot_analytic.T[nx / 2 + pos_x, sel],
pot_numeric.T[nx / 2 + pos_x, sel],
rtol=0.01, atol=0.005))
def test_weighting_field_planar(self):
''' Compare weighting field to numerical solution.
'''
width = 50.
# Analytical solution only existing for pixel width = readout pitch
# (100 % fill factor)
pitch = width
thickness = 200.
n_pixel = 11
mesh = geometry.mesh_planar_sensor(
n_pixel=n_pixel,
width=width,
thickness=thickness,
resolution=200,
filename='planar_mesh_tmp_2.msh')
potential = fields.calculate_planar_sensor_w_potential(
mesh=mesh,
width=width,
pitch=pitch,
n_pixel=n_pixel,
thickness=thickness)
# Define field/potential domain
min_x, max_x = -width * float(n_pixel), width * float(n_pixel)
min_y, max_y = 0., thickness
# Create x,y grid
nx, ny = 1000, 1000
x = np.linspace(min_x, max_x, nx)
y = np.linspace(min_y, max_y, ny)
xx, yy = np.meshgrid(x, y, sparse=True)
field_description = fields.Description(potential,
min_x=min_x,
max_x=max_x,
min_y=min_y,
max_y=max_y,
nx=nx,
ny=ny,
smoothing=0.2)
def field_analytic(x, y):
return fields.get_weighting_field_analytic(x, y, D=thickness,
S=width, is_planar=True)
# Evaluate field on a grid
f_analytic_x, f_analytic_y = field_analytic(xx, yy)
f_numeric_x, f_numeric_y = field_description.get_field(xx, yy)
# Check only at center pixel, edge pixel are not interessting
for pox_x in [-45, -30, -15, -10, 0, 10, 15, 30, 45]:
self.assertTrue(np.allclose(
f_analytic_x.T[
nx / 2 + pox_x, :], f_numeric_x.T[nx / 2 + pox_x, :],
rtol=0.01, atol=0.01))
self.assertTrue(np.allclose(
f_analytic_y.T[
nx / 2 + pox_x, :], f_numeric_y.T[nx / 2 + pox_x, :],
rtol=0.01, atol=0.01))
def test_potential_smoothing(self):
''' Checks the smoothing of the potential to be independent of the
potential values.
'''
n_pixel = 11
width = 50.
thickness = 50.
# Create x,y grid
min_x, max_x = -width * float(n_pixel), width * float(n_pixel)
min_y, max_y = 0., thickness
nx, ny = 1000, 1000
x = np.linspace(min_x, max_x, nx)
y = np.linspace(min_y, max_y, ny)
xx, yy = np.meshgrid(x, y, sparse=True)
# Load potential solution to save time
potential = dump.read(
filename=os.path.join(constant.FIXTURE_FOLDER, 'potential.sc'))
def upcale_potential(potential, V_readout, V_bias):
''' Scales potential to [V_bias, V_readout] to simulate other bias settings
'''
return ((potential - np.nanmin(potential)) /
(np.nanmax(potential) - np.nanmin(potential))) * \
(V_readout - V_bias) + V_readout
def downscale_potential(potential):
''' Scales potential to [0, 1] to make the smoothing result comparible
'''
return (potential - np.nanmin(potential)) / (np.nanmax(potential) -
np.nanmin(potential))
potential_descr = fields.Description(potential,
min_x=min_x,
max_x=max_x,
min_y=min_y,
max_y=max_y,
nx=nx,
ny=ny)
# Expected result for the std. smoothing value and a potential between
# 0 and 1
pot_numeric = downscale_potential(
potential_descr.get_potential_smooth(xx, yy))
for V_bias in [-100, -1000]:
for V_readout in [50, 0, -50]:
# Create fake data with different bias by upscaling
potential_scaled = upcale_potential(
potential, V_readout, V_bias)
# Describe upscaled data
potential_descr_scaled = fields.Description(potential_scaled,
min_x=min_x,
max_x=max_x,
min_y=min_y,
max_y=max_y,
nx=nx,
ny=ny)
# Downscale smoothed potential for comparison
pot_numeric_2 = downscale_potential(
potential_descr_scaled.get_potential_smooth(xx, yy))
self.assertTrue(
np.allclose(pot_numeric, pot_numeric_2, equal_nan=True))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
unittest.main()
| |
import re
import sys
import copy
import socket
from datetime import datetime
from decimal import Decimal
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
""" errors encountered in processing a schema (subclass of :class:`ValueError`) """
class ValidationError(ValueError):
""" validation errors encountered during validation (subclass of :class:`ValueError`) """
class FieldValidationError(ValidationError):
"""
Validation error that refers to a specific field and has `fieldname` and `value` attributes.
"""
def __init__(self, message, fieldname, value):
message = "Value {!r} for field '{}' {}".format(value, fieldname, message)
super(FieldValidationError, self).__init__(message)
self.fieldname = fieldname
self.value = value
class DependencyValidationError(ValidationError):
"""
Validation error that refers to a missing dependency
"""
def __init__(self, message):
super(DependencyValidationError, self).__init__(message)
class RequiredFieldValidationError(ValidationError):
"""
Validation error that refers to a missing field
"""
def __init__(self, message):
super(RequiredFieldValidationError, self).__init__(message)
class MultipleValidationError(ValidationError):
def __init__(self, errors):
msg = "{} validation errors:\n{}".format(len(errors), '\n'.join(str(e) for e in errors))
super(MultipleValidationError, self).__init__(msg)
self.errors = errors
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except:
msg = "is not in '{format_option}' format"
raise FieldValidationError(msg.format(format_option=format_option), fieldname, value)
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time', '%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, _int_types + (float, Decimal)) or value <= 0:
msg = "is not a positive number"
raise FieldValidationError(msg, fieldname, value)
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1" to "0.0.0.1"
socket.inet_aton(value)
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
msg = "is not a ip-address"
raise FieldValidationError(msg, fieldname, value)
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
:param disallow_unknown_properties: defaults to False, set to True to
disallow properties not listed in the schema definition
:param apply_default_to_data: defaults to False, set to True to modify the
data in case the schema definition includes a "default" property
'''
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False, disallow_unknown_properties=False,
apply_default_to_data=False, fail_fast=True):
self._format_validators = {}
self._errors = []
# add the default format validators
for key, value in DEFAULT_FORMAT_VALIDATORS.items():
self.register_format_validator(key, value)
# register any custom format validators if they were provided
if format_validators:
for key, value in format_validators.items():
self.register_format_validator(key, value)
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.disallow_unknown_properties = disallow_unknown_properties
self.apply_default_to_data = apply_default_to_data
self.fail_fast = fail_fast
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float, Decimal,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping) or (hasattr(val, 'keys') and hasattr(val, 'items'))
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, exctype=FieldValidationError, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc.format(**params)
if exctype == FieldValidationError:
err = FieldValidationError(message, fieldname, value)
elif exctype == DependencyValidationError:
err = DependencyValidationError(message)
elif exctype == RequiredFieldValidationError:
err = RequiredFieldValidationError(message)
if self.fail_fast:
raise err
else:
self._errors.append(err)
def _validate_unknown_properties(self, schema, data, fieldname):
schema_properties = set(schema)
data_properties = set(data)
delta = data_properties - schema_properties
if delta:
unknowns = ', '.join(['"{}"'.format(x) for x in delta])
raise SchemaError('Unknown properties for field "{fieldname}": {unknowns}'.format(
fieldname=fieldname, unknowns=unknowns))
def validate_type(self, x, fieldname, schema, path, fieldtype=None):
''' Validates that the fieldtype specified is correct for the given data '''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
errorlist = []
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, path, eachtype)
datavalid = True
break
except ValidationError as err:
errorlist.append(err)
if not datavalid:
self._error("doesn't match any of {numsubtypes} subtypes in {fieldtype}; "
"errorlist = {errorlist!r}",
value, fieldname, numsubtypes=len(fieldtype), fieldtype=fieldtype,
errorlist=errorlist)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype, path)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_' + fieldtype)
except AttributeError:
raise SchemaError("Field type '{}' is not supported.".format(fieldtype))
if not type_checker(value):
self._error("is not of type {fieldtype}", value, fieldname,
fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, path, properties=None):
''' Validates properties of a JSON object by processing the object's schema recursively '''
value = x.get(fieldname)
if value is not None:
if isinstance(value, dict):
if isinstance(properties, dict):
if self.disallow_unknown_properties:
self._validate_unknown_properties(properties, value, fieldname)
for eachProp in properties:
self.__validate(eachProp, value, properties.get(eachProp), path)
else:
raise SchemaError("Properties definition of field '{}' is not an object"
.format(fieldname))
def validate_items(self, x, fieldname, schema, path, items=None):
'''
Validates that all items in the list for the given field match the given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if 'additionalItems' not in schema and len(items) != len(value):
self._error("is not of same length as schema list", value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.__validate("_data", {"_data": value[itemIndex]},
items[itemIndex], path)
except FieldValidationError as e:
raise type(e)("Failed to validate field '%s' list schema: %s" %
(fieldname, e), fieldname, e.value)
elif isinstance(items, dict):
for eachItem in value:
if self.disallow_unknown_properties and 'properties' in items:
self._validate_unknown_properties(items['properties'], eachItem,
fieldname)
self.__validate("[list item]", {"[list item]": eachItem}, items, path)
else:
raise SchemaError("Properties definition of field '{}' is "
"not a list or an object".format(fieldname))
def validate_required(self, x, fieldname, schema, path, required):
''' Validates that the given field is present if required is True '''
# Make sure the field is present
if fieldname not in x and required:
self._error("Required field '{fieldname}' is missing", None, fieldname,
exctype=RequiredFieldValidationError)
def validate_blank(self, x, fieldname, schema, path, blank=False):
''' Validates that the given field is not blank if blank=False '''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("cannot be blank'", value, fieldname)
def validate_patternProperties(self, x, fieldname, schema, path, patternproperties=None):
if patternproperties is None:
patternproperties = {}
value_obj = x.get(fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.__validate("_data", {"_data": value}, schema, path)
def validate_additionalItems(self, x, fieldname, schema, path, additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error("is not of same length as schema list", value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self.__validate("_data", {"_data": remaining}, {"items": additionalItems}, path)
def validate_additionalProperties(self, x, fieldname, schema, path, additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# Shouldn't be validating additionalProperties on non-dicts
value = x.get(fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
patterns = schema["patternProperties"].keys() if 'patternProperties' in schema else []
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if (eachProperty not in properties and not
any(re.match(p, eachProperty) for p in patterns)):
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if additionalProperties is False:
self._error("contains additional property '{prop}' not defined by "
"'properties' or 'patternProperties' and additionalProperties "
" is False", value, fieldname, prop=eachProperty)
self.__validate(eachProperty, value, additionalProperties, path)
else:
raise SchemaError("additionalProperties schema definition for "
"field '{}' is not an object".format(fieldname))
def validate_dependencies(self, x, fieldname, schema, path, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '{dependency}' is required by field '{fieldname}'",
None, fieldname, dependency=dependency,
exctype=DependencyValidationError)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '{k}' is required by field '{v}'", None, fieldname,
k=k, v=v, exctype=DependencyValidationError)
else:
raise SchemaError("'dependencies' must be a string, list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, path, minimum=None):
''' Validates that the field is longer than or equal to the minimum length if specified '''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error("is less than minimum value: {minimum}", value, fieldname,
minimum=minimum)
def validate_maximum(self, x, fieldname, schema, path, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error("is greater than maximum value: {maximum}", value, fieldname,
maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, path, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("must have length less than or equal to {length}", value, fieldname,
length=length)
def validate_minLength(self, x, fieldname, schema, path, length=None):
'''
Validates that the value of the given field is longer than or equal to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("must have length greater than or equal to {length}", value, fieldname,
length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, path, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
try:
format_validator(self, fieldname, value, format_option)
except FieldValidationError as fve:
if self.fail_fast:
raise
else:
self._errors.append(fve)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, path, pattern=None):
'''
Validates that the given field, if a string, matches the given regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not re.match(pattern, value):
self._error("does not match regular expression '{pattern}'", value, fieldname,
pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, path, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error("is not unique", value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, path, options=None):
'''
Validates that the value of the field is equal to one of the specified option values
'''
value = x.get(fieldname)
if value is not None:
if callable(options):
options = options(x)
if not isinstance(options, Container):
raise SchemaError("Enumeration {!r} for field '{}' must be a container".format(
options, fieldname))
if value not in options:
self._error("is not in the enumeration: {options!r}", value, fieldname,
options=options)
def validate_title(self, x, fieldname, schema, path, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '{}' must be a string".format(fieldname))
def validate_description(self, x, fieldname, schema, path, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '{}' must be a string".format(fieldname))
def validate_divisibleBy(self, x, fieldname, schema, path, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'{!r}' <- divisibleBy can not be 0".format(schema))
if value % divisibleBy != 0:
self._error("is not divisible by '{divisibleBy}'.", x.get(fieldname), fieldname,
divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, path, disallow=None):
'''
Validates that the value of the given field does not match the disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, path, disallow)
except ValidationError:
return
self._error("is disallowed for field '{fieldname}'", x.get(fieldname), fieldname,
disallow=disallow)
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
self.__validate("data", {"data": data}, schema, ())
if self._errors:
raise MultipleValidationError(self._errors)
def __validate(self, fieldname, data, schema, path):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError("Type for field '%s' must be 'dict', got: '%s'" %
(fieldname, type(schema).__name__))
newschema = copy.copy(schema)
if 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
# iterate over schema and call all validators
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, schema, path, newschema.get(schemaprop))
if self.apply_default_to_data and 'default' in schema:
try:
self.validate_type(x={'_ds': schema['default']}, fieldname='_ds',
schema=schema,
fieldtype=schema['type'] if 'type' in schema else None,
path=path)
except FieldValidationError as exc:
raise SchemaError(exc)
if fieldname not in data:
data[fieldname] = schema['default']
return data
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module containing comment model and comment related mixins."""
from sqlalchemy import case
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import validates
from werkzeug.exceptions import BadRequest
from ggrc import db
from ggrc.models.computed_property import computed_property
from ggrc.models.deferred import deferred
from ggrc.models.revision import Revision
from ggrc.models.mixins import Base
from ggrc.models.mixins import Described
from ggrc.models.object_owner import Ownable
from ggrc.models.relationship import Relatable
class Commentable(object):
"""Mixin for commentable objects.
This is a mixin for adding default options to objects on which people can
comment.
recipients is used for setting who gets notified (Verifer, Requester, ...).
send_by_default should be used for setting the "send notification" flag in
the comment modal.
"""
# pylint: disable=too-few-public-methods
VALID_RECIPIENTS = frozenset([
"Assessor",
"Assignee",
"Creator",
"Requester",
"Verifier",
])
@validates("recipients")
def validate_recipients(self, key, value):
"""
Validate recipients list
Args:
value (string): Can be either empty, or
list of comma separated `VALID_RECIPIENTS`
"""
# pylint: disable=unused-argument
if value:
value = set(name for name in value.split(",") if name)
if value and value.issubset(self.VALID_RECIPIENTS):
# The validator is a bit more smart and also makes some filtering of the
# given data - this is intended.
return ",".join(value)
elif not value:
return ""
else:
raise ValueError(value,
'Value should be either empty ' +
'or comma separated list of ' +
', '.join(sorted(self.VALID_RECIPIENTS))
)
recipients = db.Column(db.String, nullable=True)
send_by_default = db.Column(db.Boolean)
_publish_attrs = [
"recipients",
"send_by_default",
]
_aliases = {
"recipients": "Recipients",
"send_by_default": "Send by default",
}
@declared_attr
def comments(self):
"""Comments related to self via Relationship table."""
from ggrc.models.relationship import Relationship
comment_id = case(
[(Relationship.destination_type == "Comment",
Relationship.destination_id)],
else_=Relationship.source_id,
)
commentable_id = case(
[(Relationship.destination_type == "Comment",
Relationship.source_id)],
else_=Relationship.destination_id,
)
return db.relationship(
Comment,
primaryjoin=lambda: self.id == commentable_id,
secondary=Relationship.__table__,
secondaryjoin=lambda: Comment.id == comment_id,
viewonly=True,
)
class Comment(Relatable, Described, Ownable, Base, db.Model):
"""Basic comment model."""
__tablename__ = "comments"
assignee_type = db.Column(db.String)
revision_id = deferred(db.Column(
db.Integer,
db.ForeignKey('revisions.id', ondelete='SET NULL'),
nullable=True,
), 'Comment')
revision = db.relationship(
'Revision',
uselist=False,
)
custom_attribute_definition_id = deferred(db.Column(
db.Integer,
db.ForeignKey('custom_attribute_definitions.id', ondelete='SET NULL'),
nullable=True,
), 'Comment')
custom_attribute_definition = db.relationship(
'CustomAttributeDefinition',
uselist=False,
)
# REST properties
_publish_attrs = [
"assignee_type",
"custom_attribute_revision",
]
_update_attrs = [
"assignee_type",
"custom_attribute_revision_upd",
]
_sanitize_html = [
"description",
]
@classmethod
def eager_query(cls):
query = super(Comment, cls).eager_query()
return query.options(
orm.joinedload('revision'),
orm.joinedload('custom_attribute_definition')
.undefer_group('CustomAttributeDefinition_complete'),
)
@computed_property
def custom_attribute_revision(self):
"""Get the historical value of the relevant CA value."""
if not self.revision:
return None
revision = self.revision.content
cav_stored_value = revision['attribute_value']
cad = self.custom_attribute_definition
return {
'custom_attribute': {
'id': cad.id if cad else None,
'title': cad.title if cad else 'DELETED DEFINITION',
},
'custom_attribute_stored_value': cav_stored_value,
}
def custom_attribute_revision_upd(self, value):
"""Create a Comment-CA mapping with current CA value stored."""
ca_revision_dict = value.get('custom_attribute_revision_upd')
if not ca_revision_dict:
return
ca_val_dict = self._get_ca_value(ca_revision_dict)
ca_val_id = ca_val_dict['id']
ca_val_revision = Revision.query.filter_by(
resource_type='CustomAttributeValue',
resource_id=ca_val_id,
).order_by(
Revision.created_at.desc(),
).limit(1).first()
if not ca_val_revision:
raise BadRequest("No Revision found for CA value with id provided under "
"'custom_attribute_value': {}"
.format(ca_val_dict))
self.revision_id = ca_val_revision.id
self.custom_attribute_definition_id = ca_val_revision.content.get(
'custom_attribute_id',
)
@staticmethod
def _get_ca_value(ca_revision_dict):
"""Get CA value dict from json and do a basic validation."""
ca_val_dict = ca_revision_dict.get('custom_attribute_value')
if not ca_val_dict:
raise ValueError("CA value expected under "
"'custom_attribute_value': {}"
.format(ca_revision_dict))
if not ca_val_dict.get('id'):
raise ValueError("CA value id expected under 'id': {}"
.format(ca_val_dict))
return ca_val_dict
| |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import datetime
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
import testscenarios
from neutron.common import constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import agentschedulers_db as sched_db
from neutron.db import models_v2
from neutron.extensions import dhcpagentscheduler
from neutron.scheduler import dhcp_agent_scheduler
from neutron.tests.unit import testlib_api
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(TestDhcpSchedulerBaseTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.network = {'id': 'foo_network_id'}
self.network_id = 'foo_network_id'
self._save_networks([self.network_id])
def _get_agents(self, hosts):
return [
agents_db.Agent(
binary='neutron-dhcp-agent',
host=host,
topic=topics.DHCP_AGENT,
configurations="",
agent_type=constants.AGENT_TYPE_DHCP,
created_at=timeutils.utcnow(),
started_at=timeutils.utcnow(),
heartbeat_timestamp=timeutils.utcnow())
for host in hosts
]
def _save_agents(self, agents):
for agent in agents:
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(agent)
def _create_and_set_agents_down(self, hosts, down_agent_count=0, **kwargs):
dhcp_agents = self._get_agents(hosts)
# bring down the specified agents
for agent in dhcp_agents[:down_agent_count]:
old_time = agent['heartbeat_timestamp']
hour_old = old_time - datetime.timedelta(hours=1)
agent['heartbeat_timestamp'] = hour_old
agent['started_at'] = hour_old
for agent in dhcp_agents:
agent.update(kwargs)
self._save_agents(dhcp_agents)
return dhcp_agents
def _save_networks(self, networks):
for network_id in networks:
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
def _test_schedule_bind_network(self, agents, network_id):
scheduler = dhcp_agent_scheduler.ChanceScheduler()
scheduler.resource_filter.bind(self.ctx, agents, network_id)
results = self.ctx.session.query(
sched_db.NetworkDhcpAgentBinding).filter_by(
network_id=network_id).all()
self.assertEqual(len(agents), len(results))
for result in results:
self.assertEqual(network_id, result.network_id)
class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase):
def test_schedule_bind_network_single_agent(self):
agents = self._create_and_set_agents_down(['host-a'])
self._test_schedule_bind_network(agents, self.network_id)
def test_schedule_bind_network_multi_agents(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'])
self._test_schedule_bind_network(agents, self.network_id)
def test_schedule_bind_network_multi_agent_fail_one(self):
agents = self._create_and_set_agents_down(['host-a'])
self._test_schedule_bind_network(agents, self.network_id)
with mock.patch.object(dhcp_agent_scheduler.LOG, 'info') as fake_log:
self._test_schedule_bind_network(agents, self.network_id)
self.assertEqual(1, fake_log.call_count)
class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase):
"""Unit test scenarios for ChanceScheduler.auto_schedule_networks.
network_present
Network is present or not
enable_dhcp
Dhcp is enabled or disabled in the subnet of the network
scheduled_already
Network is already scheduled to the agent or not
agent_down
Dhcp agent is down or alive
valid_host
If true, then an valid host is passed to schedule the network,
else an invalid host is passed.
"""
scenarios = [
('Network present',
dict(network_present=True,
enable_dhcp=True,
scheduled_already=False,
agent_down=False,
valid_host=True)),
('No network',
dict(network_present=False,
enable_dhcp=False,
scheduled_already=False,
agent_down=False,
valid_host=True)),
('Network already scheduled',
dict(network_present=True,
enable_dhcp=True,
scheduled_already=True,
agent_down=False,
valid_host=True)),
('Agent down',
dict(network_present=True,
enable_dhcp=True,
scheduled_already=False,
agent_down=False,
valid_host=True)),
('dhcp disabled',
dict(network_present=True,
enable_dhcp=False,
scheduled_already=False,
agent_down=False,
valid_host=False)),
('Invalid host',
dict(network_present=True,
enable_dhcp=True,
scheduled_already=False,
agent_down=False,
valid_host=False)),
]
def test_auto_schedule_network(self):
plugin = mock.MagicMock()
plugin.get_subnets.return_value = (
[{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp}]
if self.network_present else [])
scheduler = dhcp_agent_scheduler.ChanceScheduler()
if self.network_present:
down_agent_count = 1 if self.agent_down else 0
agents = self._create_and_set_agents_down(
['host-a'], down_agent_count=down_agent_count)
if self.scheduled_already:
self._test_schedule_bind_network(agents, self.network_id)
expected_result = (self.network_present and self.enable_dhcp)
expected_hosted_agents = (1 if expected_result and
self.valid_host else 0)
host = "host-a" if self.valid_host else "host-b"
observed_ret_value = scheduler.auto_schedule_networks(
plugin, self.ctx, host)
self.assertEqual(expected_result, observed_ret_value)
hosted_agents = self.ctx.session.query(
sched_db.NetworkDhcpAgentBinding).all()
self.assertEqual(expected_hosted_agents, len(hosted_agents))
class TestNetworksFailover(TestDhcpSchedulerBaseTestCase,
sched_db.DhcpAgentSchedulerDbMixin):
def test_reschedule_network_from_down_agent(self):
agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1)
self._test_schedule_bind_network([agents[0]], self.network_id)
self._save_networks(["foo-network-2"])
self._test_schedule_bind_network([agents[1]], "foo-network-2")
with contextlib.nested(
mock.patch.object(self, 'remove_network_from_dhcp_agent'),
mock.patch.object(self, 'schedule_network',
return_value=[agents[1]]),
mock.patch.object(self, 'get_network', create=True,
return_value={'id': self.network_id})
) as (rn, sch, getn):
notifier = mock.MagicMock()
self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier
self.remove_networks_from_down_agents()
rn.assert_called_with(mock.ANY, agents[0].id, self.network_id,
notify=False)
sch.assert_called_with(mock.ANY, {'id': self.network_id})
notifier.network_added_to_agent.assert_called_with(
mock.ANY, self.network_id, agents[1].host)
def _test_failed_rescheduling(self, rn_side_effect=None):
agents = self._create_and_set_agents_down(['host-a'], 1)
self._test_schedule_bind_network([agents[0]], self.network_id)
with contextlib.nested(
mock.patch.object(
self, 'remove_network_from_dhcp_agent',
side_effect=rn_side_effect),
mock.patch.object(self, 'schedule_network',
return_value=None),
mock.patch.object(self, 'get_network', create=True,
return_value={'id': self.network_id})
) as (rn, sch, getn):
notifier = mock.MagicMock()
self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier
self.remove_networks_from_down_agents()
rn.assert_called_with(mock.ANY, agents[0].id, self.network_id,
notify=False)
sch.assert_called_with(mock.ANY, {'id': self.network_id})
self.assertFalse(notifier.network_added_to_agent.called)
def test_reschedule_network_from_down_agent_failed(self):
self._test_failed_rescheduling()
def test_reschedule_network_from_down_agent_concurrent_removal(self):
self._test_failed_rescheduling(
rn_side_effect=dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id='foo', agent_id='bar'))
def test_filter_bindings(self):
bindings = [
sched_db.NetworkDhcpAgentBinding(network_id='foo1',
dhcp_agent={'id': 'id1'}),
sched_db.NetworkDhcpAgentBinding(network_id='foo2',
dhcp_agent={'id': 'id1'}),
sched_db.NetworkDhcpAgentBinding(network_id='foo3',
dhcp_agent={'id': 'id2'}),
sched_db.NetworkDhcpAgentBinding(network_id='foo4',
dhcp_agent={'id': 'id2'})]
with mock.patch.object(self, 'agent_starting_up',
side_effect=[True, False]):
res = [b for b in self._filter_bindings(None, bindings)]
# once per each agent id1 and id2
self.assertEqual(2, len(res))
res_ids = [b.network_id for b in res]
self.assertIn('foo3', res_ids)
self.assertIn('foo4', res_ids)
class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase):
"""Unit test scenarios for WeightScheduler.schedule."""
hostc = {
'binary': 'neutron-dhcp-agent',
'host': 'host-c',
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'networks': 0,
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
hostd = {
'binary': 'neutron-dhcp-agent',
'host': 'host-d',
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'networks': 1,
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
def setUp(self):
super(DHCPAgentWeightSchedulerTestCase, self).setUp()
DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(DB_PLUGIN_KLASS)
cfg.CONF.set_override("network_scheduler_driver",
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler')
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.'
'Ml2Plugin')
self.assertEqual(1, self.patched_dhcp_periodic.call_count)
self.plugin.network_scheduler = importutils.import_object(
'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler'
)
cfg.CONF.set_override('dhcp_agents_per_network', 1)
cfg.CONF.set_override("dhcp_load_type", "networks")
def test_scheduler_one_agents_per_network(self):
cfg.CONF.set_override('dhcp_agents_per_network', 1)
self._save_networks(['1111'])
agents = self._get_agents(['host-c', 'host-d'])
self._save_agents(agents)
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
self.assertEqual(1, len(agents))
def test_scheduler_two_agents_per_network(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
self._save_networks(['1111'])
agents = self._get_agents(['host-c', 'host-d'])
self._save_agents(agents)
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
self.assertEqual(2, len(agents))
def test_scheduler_no_active_agents(self):
self._save_networks(['1111'])
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
self.assertEqual(0, len(agents))
def test_scheduler_equal_distribution(self):
cfg.CONF.set_override('dhcp_agents_per_network', 1)
self._save_networks(['1111', '2222', '3333'])
agents = self._get_agents(['host-c', 'host-d'])
self._save_agents(agents)
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.ctx,
agent_state={'agent_state': self.hostc},
time=timeutils.strtime())
callback.report_state(self.ctx,
agent_state={'agent_state': self.hostd},
time=timeutils.strtime())
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '1111'})
agent1 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['1111'])
self.hostd['configurations']['networks'] = 2
callback.report_state(self.ctx,
agent_state={'agent_state': self.hostd},
time=timeutils.strtime())
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '2222'})
agent2 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['2222'])
self.hostc['configurations']['networks'] = 4
callback.report_state(self.ctx,
agent_state={'agent_state': self.hostc},
time=timeutils.strtime())
self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
{'id': '3333'})
agent3 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
['3333'])
self.assertEqual('host-c', agent1[0]['host'])
self.assertEqual('host-c', agent2[0]['host'])
self.assertEqual('host-d', agent3[0]['host'])
class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase,
sched_db.DhcpAgentSchedulerDbMixin):
def _test_get_dhcp_agents_hosting_networks(self, expected, **kwargs):
agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1)
agents += self._create_and_set_agents_down(['host-c', 'host-d'], 1,
admin_state_up=False)
self._test_schedule_bind_network(agents, self.network_id)
agents = self.get_dhcp_agents_hosting_networks(self.ctx,
[self.network_id],
**kwargs)
host_ids = set(a['host'] for a in agents)
self.assertEqual(expected, host_ids)
def test_get_dhcp_agents_hosting_networks_default(self):
self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b',
'host-c', 'host-d'})
def test_get_dhcp_agents_hosting_networks_active(self):
self._test_get_dhcp_agents_hosting_networks({'host-b', 'host-d'},
active=True)
def test_get_dhcp_agents_hosting_networks_admin_up(self):
self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b'},
admin_state_up=True)
def test_get_dhcp_agents_hosting_networks_active_admin_up(self):
self._test_get_dhcp_agents_hosting_networks({'host-b'},
active=True,
admin_state_up=True)
def test_get_dhcp_agents_hosting_networks_admin_down(self):
self._test_get_dhcp_agents_hosting_networks({'host-c', 'host-d'},
admin_state_up=False)
def test_get_dhcp_agents_hosting_networks_active_admin_down(self):
self._test_get_dhcp_agents_hosting_networks({'host-d'},
active=True,
admin_state_up=False)
| |
#!/usr/bin/env python3
# 556D_fug.py - Codeforces.com/problemset/problem/556/D Fug quiz by Sergey 2015
# Standard modules
import unittest
import sys
import re
# Additional modules
import bisect
###############################################################################
# Fastlist Class
###############################################################################
class Fastlist(object):
""" Fastlist representation """
def __init__(self, l=[], load=5000, sorted=0):
self._load = load
self._sorted = sorted
self._lists = []
self._starts = []
self._mins = []
self._insert_list()
self._irev = 0
self._ii = 0
self._il = 0
self.extend(l)
def _index_location(self, index):
if len(self._lists[0]) == 0:
raise IndexError("List index out of range")
if index == 0:
return (0, 0)
if index == -1:
return (len(self._lists) - 1, len(self._lists[-1]) - 1)
if self._sorted:
raise RuntimeError("No index access to the sorted list, exc 0, -1")
length = len(self)
if index < 0:
index = length + index
if index >= length:
raise IndexError("List index out of range")
il = bisect.bisect_right(self._starts, index) - 1
return (il, index - self._starts[il])
def _insert_list(self, il=None):
if il is None:
il = len(self._lists)
self._lists.insert(il, [])
if self._sorted:
if il == 0:
self._mins.insert(il, None)
else:
self._mins.insert(il, self._lists[il-1][-1])
else:
if il == 0:
self._starts.insert(il, 0)
else:
start = self._starts[il-1] + len(self._lists[il-1])
self._starts.insert(il, start)
def _del_list(self, il):
del self._lists[il]
if self._sorted:
del self._mins[il]
else:
del self._starts[il]
def _rebalance(self, il):
illen = len(self._lists[il])
if illen >= self._load * 2:
self._insert_list(il)
self._even_lists(il)
if illen <= self._load * 0.2:
if il != 0:
self._even_lists(il-1)
elif len(self._lists) > 1:
self._even_lists(il)
def _even_lists(self, il):
tot = len(self._lists[il]) + len(self._lists[il+1])
if tot < self._load * 1:
self._lists[il] += self._lists[il+1]
self._del_list(il+1)
if self._sorted:
self._mins[il] = self._lists[il][0]
else:
half = tot//2
ltot = self._lists[il] + self._lists[il+1]
self._lists[il] = ltot[:half]
self._lists[il+1] = ltot[half:]
if self._sorted:
self._mins[il] = self._lists[il][0]
self._mins[il+1] = self._lists[il+1][0]
else:
self._starts[il+1] = self._starts[il] + len(self._lists[il])
def _obj_location(self, obj, l=0):
if not self._sorted:
raise RuntimeError("No by-value access to an unserted list")
il = 0
if len(self._mins) > 1 and obj > self._mins[0]:
if l:
il = bisect.bisect_left(self._mins, obj) - 1
else:
il = bisect.bisect_right(self._mins, obj) - 1
if l:
ii = bisect.bisect_left(self._lists[il], obj)
else:
ii = bisect.bisect_right(self._lists[il], obj)
return (il, ii)
def insert(self, index, obj):
(il, ii) = self._index_location(index)
self._lists[il].insert(ii, obj)
for j in range(il + 1, len(self._starts)):
self._starts[j] += 1
self._rebalance(il)
def append(self, obj):
if len(self._lists[-1]) >= self._load:
self._insert_list()
self._lists[-1].append(obj)
if self._sorted and self._mins[0] is None:
self._mins[0] = self._lists[0][0]
def extend(self, iter):
for n in iter:
self.append(n)
def pop(self, index=None):
if index is None:
index = -1
(il, ii) = self._index_location(index)
item = self._lists[il].pop(ii)
if self._sorted:
if ii == 0 and len(self._lists[il]) > 0:
self._mins[il] = self._lists[il][0]
else:
for j in range(il + 1, len(self._starts)):
self._starts[j] -= 1
self._rebalance(il)
return item
def clear(self):
self._lists.clear()
self._starts.clear()
self._mins.clear()
self._insert_list()
def as_list(self):
return sum(self._lists, [])
def insort(self, obj, l=0):
(il, ii) = self._obj_location(obj, l)
self._lists[il].insert(ii, obj)
if ii == 0:
self._mins[il] = obj
self._rebalance(il)
def insort_left(self, obj):
self.insort(obj, l=1)
def add(self, obj):
if self._sorted:
self.insort(obj)
else:
self.append(obj)
def __str__(self):
return str(self.as_list())
def __setitem__(self, index, obj):
if isinstance(index, int):
(il, ii) = self._index_location(index)
self._lists[il][ii] = obj
elif isinstance(index, slice):
raise RuntimeError("Slice assignment is not supported")
def __getitem__(self, index):
if isinstance(index, int):
(il, ii) = self._index_location(index)
return self._lists[il][ii]
elif isinstance(index, slice):
rg = index.indices(len(self))
if rg[0] == 0 and rg[1] == len(self) and rg[2] == 1:
return self.as_list()
return [self.__getitem__(index) for index in range(*rg)]
def __iadd__(self, obj):
if self._sorted:
[self.insort(n) for n in obj]
else:
[self.append(n) for n in obj]
return self
def __delitem__(self, index):
if isinstance(index, int):
self.pop(index)
elif isinstance(index, slice):
rg = index.indices(len(self))
[self.__delitem__(rg[0]) for i in range(*rg)]
def __len__(self):
if self._sorted:
return sum([len(l) for l in self._lists])
return self._starts[-1] + len(self._lists[-1])
def __contains__(self, obj):
if self._sorted:
it = self.lower_bound(obj)
return not it.iter_end() and obj == it.iter_getitem()
else:
for n in self:
if obj == n:
return True
return False
def __bool__(self):
return len(self._lists[0]) != 0
def __iter__(self):
if not self._irev:
self._il = self._ii = 0
else:
self._il = len(self._lists) - 1
self._ii = len(self._lists[self._il]) - 1
return self
def __reversed__(self):
self._irev = 1
self.__iter__()
return self
def _iter_fix(self):
if not self._irev:
if (self._il != len(self._lists) - 1 and
self._ii == len(self._lists[self._il])):
self._il += 1
self._ii = 0
else:
if self._il != 0 and self._ii == -1:
self._il -= 1
self._ii = len(self._lists[self._il]) - 1
def __next__(self):
item = self.iter_getitem()
if not self._irev:
self._ii += 1
else:
self._ii -= 1
return item
def iter_end(self):
if not self._irev:
return (self._il == len(self._lists) - 1 and
self._ii == len(self._lists[self._il]))
else:
return (self._il == 0 and self._ii == -1)
def iter_getitem(self):
if self.iter_end() or len(self._lists[0]) == 0:
raise StopIteration("Iteration stopped")
self._iter_fix()
return self._lists[self._il][self._ii]
def iter_del(self):
item = self._lists[self._il].pop(self._ii)
if self._sorted:
if self._ii == 0 and len(self._lists[self._il]) > 0:
self._mins[self._il] = self._lists[self._il][0]
else:
for j in range(self._il + 1, len(self._starts)):
self._starts[j] -= 1
self._rebalance(self._il)
return item
def lower_bound(self, obj):
(self._il, self._ii) = self._obj_location(obj, l=1)
return self
def upper_bound(self, obj):
(self._il, self._ii) = self._obj_location(obj)
return self
###############################################################################
# Fug Class
###############################################################################
class Fug:
""" Fug representation """
def __init__(self, args):
""" Default constructor """
self.gsrt = args[0]
self.asrt = args[1]
self.gn = args[2]
self.result = [0]*self.gn
self.a = Fastlist(self.asrt, load=500, sorted=1)
def calculate(self):
""" Main calcualtion function of the class """
for i in range(self.gn):
g = self.gsrt[i]
it = self.a.lower_bound((g[1], 0))
if not it.iter_end():
alb = it.iter_getitem()
if alb[0] > g[0]:
return "No"
self.result[g[2]] = alb[1]+1
it.iter_del()
else:
return "No"
answer = "Yes\n" + " ".join(map(str, self.result))
return answer
###############################################################################
# Executable code
###############################################################################
def get_inputs(test_inputs=None):
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
""" Unit-testable input function wrapper """
if it:
return next(it)
else:
return sys.stdin.readline()
# Getting string inputs. Place all uinput() calls here
num = list(map(int, uinput().split()))
gaps = []
prevli = list(map(int, uinput().split()))
for i in range(num[0] - 1):
li = list(map(int, uinput().split()))
min = li[0] - prevli[1]
max = li[1] - prevli[0]
gaps.append((max, min, i))
prevli = li
a = list(map(int, uinput().split()))
alist = [(n, i) for i, n in enumerate(a)]
# Decoding inputs into a list
inputs = [sorted(gaps), sorted(alist), num[0]-1]
return inputs
def calculate(test_inputs=None):
""" Base class calculate method wrapper """
return Fug(get_inputs(test_inputs)).calculate()
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_sample_tests(self):
""" Quiz sample tests. Add \n to separate lines """
# Sample test 1
test = "4 4\n1 4\n7 8\n9 10\n12 14\n4 5 3 8"
self.assertEqual(calculate(test), "Yes\n2 3 1")
self.assertEqual(
get_inputs(test),
[[(3, 1, 1), (5, 2, 2), (7, 3, 0)],
[(3, 2), (4, 0), (5, 1), (8, 3)], 3])
# My tests
test = "5 5\n1 1\n2 7\n8 8\n10 10\n16 16\n1 1 5 6 2"
self.assertEqual(calculate(test), "Yes\n1 2 5 4")
# Other tests
test = "2 2\n11 14\n17 18\n2 9"
self.assertEqual(calculate(test), "No")
# Other tests
test = (
"2 1\n1 1\n1000000000000000000 1000000000000000000" +
"\n999999999999999999")
self.assertEqual(calculate(test), "Yes\n1")
test = ("5 9\n1 2\n3 3\n5 7\n11 13\n14 20\n2 3 4 10 6 2 6 9 5")
self.assertEqual(calculate(test), "Yes\n1 6 3 2")
size = 2000
test = str(size) + " " + str(size) + "\n"
x = size*1000
for i in range(size):
test += str(x) + " " + str(x + i + 1) + "\n"
x += 2 * (i + 1)
for i in reversed(range(size)):
test += str(i) + " "
self.assertEqual(calculate(test)[0], "Y")
def test_Fug_class__basic_functions(self):
""" Fug class basic functions testing """
# Constructor test
d = Fug([[(1, 3, 1), (2, 5, 2), (3, 7, 0)],
[(3, 2), (4, 0), (5, 1), (8, 3)], 3])
# Sort bridges
self.assertEqual(d.asrt[0], (3, 2))
# Sort Gaps
self.assertEqual(d.gsrt[0], (1, 3, 1))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(calculate())
| |
"""PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
from functools import wraps
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
import setuptools
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py27compat import get_all_headers
from setuptools.py33compat import unescape
from setuptools.wheel import Wheel
__metaclass__ = type
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
return []
return [Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
class ContentChecker:
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if os.getenv("CONDA_BUILD"):
raise RuntimeError("Setuptools downloading is disabled in conda build. "
"Be sure to add all dependencies in the meta.yaml url=%s" % url)
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
skipped[dist] = 1
continue
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
)
if test:
loc = self.download(dist.location, tmpdir)
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if not dist and local_index is not None:
dist = find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp = None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(url)
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp:
fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except (http_client.HTTPException, socket.error) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = urllib.parse.splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s -q)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def decode_entity(match):
what = match.group(0)
return unescape(what)
def htmldecode(text):
"""
Decode HTML entities in the given text.
>>> htmldecode(
... 'https://../package_name-0.1.2.tar.gz'
... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential:
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = urllib.parse.splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, host, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == host:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| |
from datetime import datetime, timedelta
import json
from rdr_service.clock import FakeClock
from rdr_service import clock
from rdr_service.code_constants import *
from rdr_service.dao.biobank_order_dao import BiobankOrderDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.physical_measurements_dao import PhysicalMeasurementsDao
from rdr_service.model.biobank_order import BiobankOrder, BiobankOrderIdentifier, BiobankOrderedSample
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.hpo import HPO
from rdr_service.model.measurements import PhysicalMeasurements
from rdr_service.model.site import Site
from rdr_service.participant_enums import WithdrawalAIANCeremonyStatus
from tests.test_data import load_measurement_json
from tests.helpers.unittest_base import BaseTestCase, PDRGeneratorTestMixin
class BigQuerySyncDaoTest(BaseTestCase, PDRGeneratorTestMixin):
TIME_1 = datetime(2018, 9, 20, 5, 49, 11)
TIME_2 = datetime(2018, 9, 24, 14, 21, 1)
TIME_3 = datetime(2018, 9, 25, 12, 25, 30)
site = None
hpo = None
summary = None
pm_json = None
pm = None
bio_order = None
qn_thebasics_id = None
qn_ehrconsent_id = None
qn_dvehrconsent_id = None
qn_lifestyle_id = None
qn_overall_health_id = None
qn_gror_id = None
def setUp(self):
super(BigQuerySyncDaoTest, self).setUp(with_consent_codes=True)
self.dao = ParticipantDao()
with self.dao.session() as session:
self.site = session.query(Site).filter(Site.googleGroup == 'hpo-site-monroeville').first()
self.hpo = session.query(HPO).filter(HPO.name == 'PITT').first()
self.provider_link = {
"primary": True, "organization": {"display": None, "reference": "Organization/PITT"}}
with clock.FakeClock(self.TIME_1):
self.participant = self.create_participant(self.provider_link)
self.participant_id = int(self.participant['participantId'].replace('P', ''))
self.biobank_id = int(self.participant['biobankId'].replace('Z', ''))
def create_participant(self, provider_link=None):
if provider_link:
provider_link = {"providerLink": [provider_link]}
else:
provider_link = {}
response = self.send_post("Participant", provider_link)
return response
def _submit_ehrconsent(self, participant_id, response_code=CONSENT_PERMISSION_YES_CODE, response_time=None):
""" Submit the EHRConsent questionnaire """
if not self.qn_ehrconsent_id:
self.qn_ehrconsent_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = list()
code_answers.append(self.make_code_answer('ehrConsent', response_code))
qr = self.make_questionnaire_response_json(participant_id, self.qn_ehrconsent_id,
code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_ehrconsent_expired(self, participant_id, response_code=CONSENT_PERMISSION_NO_CODE, response_time=None):
""" Submit the EHRConsent questionnaire """
if not self.qn_ehrconsent_id:
self.qn_ehrconsent_id = self.create_questionnaire("ehr_consent_questionnaire.json")
code_answers = []
code_answers.append(self.make_code_answer('ehrConsent', response_code))
qr_json = self.make_questionnaire_response_json(
participant_id,
self.qn_ehrconsent_id,
string_answers=[['ehrConsentExpired', 'EHRConsentPII_ConsentExpired_Yes']],
code_answers=code_answers,
authored=response_time if response_time else self.TIME_1
)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr_json)
def _submit_dvehrconsent(self, participant_id, response_code=DVEHRSHARING_CONSENT_CODE_YES, response_time=None):
""" Submit the DVEHRConsent questionnaire """
if not self.qn_dvehrconsent_id:
self.qn_dvehrconsent_id = self.create_questionnaire("dv_ehr_share_consent_questionnaire.json")
code_answers = list()
code_answers.append(self.make_code_answer(DVEHR_SHARING_QUESTION_CODE, response_code))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_dvehrconsent_id,
code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_thebasics(self, participant_id):
""" Submit the TheBasics questionnaire """
if not self.qn_thebasics_id:
self.qn_thebasics_id = self.create_questionnaire("questionnaire3.json")
string_answers = list()
string_answers.append(('firstName', 'John'))
string_answers.append(('lastName', 'Doe'))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_thebasics_id,
string_answers=string_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_lifestyle(self, participant_id):
""" Submit the LifeStyle questionnaire """
if not self.qn_lifestyle_id:
self.qn_lifestyle_id = self.create_questionnaire("questionnaire4.json")
code_answers = list()
code_answers.append(self.make_code_answer('state', UNSET))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_lifestyle_id,
code_answers=code_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_overall_health(self, participant_id):
""" Submit the OverallHealth questionnaire """
if not self.qn_overall_health_id:
self.qn_overall_health_id = self.create_questionnaire("questionnaire_overall_health.json")
code_answers = list()
code_answers.append(self.make_code_answer('physicalHealth', UNSET))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_overall_health_id,
code_answers=code_answers)
with FakeClock(self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _submit_genomics_ror(self, participant_id, consent_response=CONSENT_GROR_YES_CODE, response_time=None):
""" Submit the Genomics ROR questionnaire """
if not self.qn_gror_id:
self.qn_gror_id = self.create_questionnaire("consent_for_genomic_ror_question.json")
code_answers = list()
code_answers.append(self.make_code_answer('genomic_consent', consent_response))
qr = self.make_questionnaire_response_json(self.participant_id, self.qn_gror_id, code_answers=code_answers)
with FakeClock(response_time or self.TIME_1):
self.send_post(f"Participant/P{participant_id}/QuestionnaireResponse", qr)
def _make_physical_measurements(self, **kwargs):
"""Makes a new PhysicalMeasurements (same values every time) with valid/complete defaults.
Kwargs pass through to PM constructor, overriding defaults.
"""
for k, default_value in (
('physicalMeasurementsId', 1),
('participantId', self.participant_id),
('createdSiteId', self.site.siteId),
('finalizedSiteId', self.site.siteId)):
if k not in kwargs:
kwargs[k] = default_value
record = PhysicalMeasurements(**kwargs)
PhysicalMeasurementsDao.store_record_fhir_doc(record, self.pm_json)
return record
def _make_biobank_order(self, **kwargs):
"""Makes a new BiobankOrder (same values every time) with valid/complete defaults.
Kwargs pass through to BiobankOrder constructor, overriding defaults.
"""
for k, default_value in (
('biobankOrderId', '1'),
('created', clock.CLOCK.now()),
('participantId', self.participant_id),
('sourceSiteId', 1),
('sourceUsername', 'fred@pmi-ops.org'),
('collectedSiteId', 1),
('collectedUsername', 'joe@pmi-ops.org'),
('processedSiteId', 1),
('processedUsername', 'sue@pmi-ops.org'),
('finalizedSiteId', 2),
('finalizedUsername', 'bob@pmi-ops.org'),
('identifiers', [BiobankOrderIdentifier(system='https://www.pmi-ops.org', value='123456789')]),
('samples', [BiobankOrderedSample(
biobankOrderId='1',
test='1ED04',
description=u'description',
finalized=self.TIME_1,
processingRequired=True)])):
if k not in kwargs:
kwargs[k] = default_value
biobank_order = BiobankOrder(**kwargs)
bss = BiobankStoredSample()
bss.biobankId = self.biobank_id
bss.test = '1ED04'
bss.biobankOrderIdentifier = '123456789'
bss.confirmed = self.TIME_2
bss.created = self.TIME_2
bss.biobankStoredSampleId = 'I11111111'
bss.family_id = 'F11111111'
with self.dao.session() as session:
session.add(bss)
return biobank_order
def test_registered_participant_gen(self):
""" Test a BigQuery after initial participant creation """
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'REGISTERED')
def test_interested_participant_gen(self):
""" Basic Participant Creation Test"""
self.send_consent(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json.get('sign_up_time', None),
self.TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ps_json.get('suspension_status', None), 'NOT_SUSPENDED')
self.assertEqual(ps_json.get('withdrawn_status'), None, 'NOT_WITHDRAWN')
self.assertEqual(ps_json.get('enrollment_status', None), 'PARTICIPANT')
def test_member_participant_status(self):
""" Member Participant Test"""
# set up questionnaires to hit the calculate_max_core_sample_time in participant summary
self.send_consent(self.participant_id)
self._submit_ehrconsent(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'FULLY_CONSENTED')
def _set_up_participant_data(self, fake_time=None, skip_ehr=False):
# set up questionnaires to hit the calculate_max_core_sample_time in participant summary
with clock.FakeClock(fake_time or self.TIME_1):
self.send_consent(self.participant_id)
if not skip_ehr:
self._submit_ehrconsent(self.participant_id)
self._submit_lifestyle(self.participant_id)
self._submit_thebasics(self.participant_id)
self._submit_overall_health(self.participant_id)
self.pm_json = json.dumps(load_measurement_json(self.participant_id, self.TIME_2.isoformat()))
self.pm = PhysicalMeasurementsDao().insert(self._make_physical_measurements())
self.dao = BiobankOrderDao()
self.bio_order = BiobankOrderDao().insert(
self._make_biobank_order(participantId=self.participant_id))
def test_full_participant_status(self):
""" Full Participant Test"""
self._set_up_participant_data()
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_2', ps_json['consent_cohort'], 'Test is built assuming cohort 2')
self.assertEqual(ps_json['pm'][0]['pm_finalized_site'], 'hpo-site-monroeville')
self.assertEqual(ps_json['pm'][0]['pm_status'], 'COMPLETED')
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
def test_ehr_consent_expired_for_full_consent_participant(self):
p_response = self.create_participant(self.provider_link)
p_id = int(p_response['participantId'].replace('P', ''))
self.send_consent(p_id, authored=self.TIME_1)
self._submit_ehrconsent(p_id, response_time=self.TIME_1)
ps_json = self.make_bq_participant_summary(p_id)
self.assertIsNotNone(ps_json)
self.assertEqual(ps_json['enrollment_status'], 'FULLY_CONSENTED')
# send ehr consent expired response
self._submit_ehrconsent_expired(p_id, response_time=self.TIME_2)
ps_json = self.make_bq_participant_summary(p_id)
self.assertIsNotNone(ps_json)
# downgrade FULLY_CONSENTED to PARTICIPANT
self.assertEqual(ps_json['enrollment_status'], 'PARTICIPANT')
def test_ehr_consent_expired_for_core_participant(self):
self._set_up_participant_data(fake_time=self.TIME_1)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_2', ps_json['consent_cohort'], 'Test is built assuming cohort 2')
self.assertEqual(ps_json['pm'][0]['pm_finalized_site'], 'hpo-site-monroeville')
self.assertEqual(ps_json['pm'][0]['pm_status'], 'COMPLETED')
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
# send ehr consent expired response
self._submit_ehrconsent_expired(self.participant_id, response_time=self.TIME_3)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
# once CORE, always CORE
self.assertEqual(ps_json['enrollment_status'], 'CORE_PARTICIPANT')
def test_cohort_3_without_gror(self):
self._set_up_participant_data(fake_time=datetime(2020, 6, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('FULLY_CONSENTED', ps_json['enrollment_status'])
def test_cohort_3_with_gror(self):
self._set_up_participant_data(fake_time=datetime(2020, 6, 1))
self._submit_genomics_ror(self.participant_id)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
def test_participant_stays_core(self):
self._set_up_participant_data(fake_time=datetime(2020, 5, 1))
self._submit_genomics_ror(self.participant_id,
consent_response=CONSENT_GROR_YES_CODE,
response_time=datetime(2020, 7, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_3', ps_json['consent_cohort'], 'Test is built assuming cohort 3')
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant starts as core')
# Send an update to remove GROR consent and make sure participant is still CORE
self._submit_genomics_ror(self.participant_id,
consent_response=CONSENT_GROR_NO_CODE,
response_time=datetime(2020, 9, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
# This verifies the module submitted status from the participant generator data for each of the GROR modules
# Also checks that an external id key/value pair exists (but value likely None for test data modules)
gror_modules = self.get_generated_items(ps_json['modules'], item_key='mod_module', item_value='GROR',
sort_key='mod_authored')
self.assertIn('mod_external_id', gror_modules[0])
self.assertEqual('SUBMITTED', gror_modules[0]['mod_status'])
self.assertEqual('SUBMITTED_NO_CONSENT', gror_modules[1]['mod_status'])
def test_previous_ehr_and_dv_ehr_reverted(self):
# Scenario: a participant previously reached core participant status with EHR and DV EHR consent both YES
# If EHR consent is changed to No, they should remain Core
self._set_up_participant_data(skip_ehr=True)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_2', ps_json['consent_cohort'],
'Test is built assuming cohort 2 (and that GROR consent is not required for Core status')
self.assertNotEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant does not initialize as Core')
# Get Core status through EHR consents
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_YES_CODE,
response_time=datetime(2019, 2, 14))
self._submit_dvehrconsent(self.participant_id, response_time=datetime(2019, 4, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'],
'Test is built assuming participant achieves Core status')
# Send an update to remove EHR consent and make sure participant is still CORE
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_NO_CODE,
response_time=datetime(2019, 7, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('CORE_PARTICIPANT', ps_json['enrollment_status'])
# This verifies the module submitted status from the participant generator data for ehr modules
# Also checks that an external id key/value pair exists (but value likely None for test data modules)
ehr_modules = self.get_generated_items(ps_json['modules'], item_key='mod_module', item_value='EHRConsentPII',
sort_key="mod_authored")
self.assertIn('mod_external_id',ehr_modules[0])
self.assertEqual('SUBMITTED', ehr_modules[0]['mod_status'])
self.assertEqual('SUBMITTED_NO_CONSENT', ehr_modules[1]['mod_status'])
def test_no_on_ehr_overrides_yes_on_dv(self):
# Scenario: a participant has had DV_EHR yes, but previously had a no on EHR.
# No on EHR should supersede a yes on DV_EHR.
self._set_up_participant_data(skip_ehr=True)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('COHORT_2', ps_json['consent_cohort'],
'Test is built assuming cohort 2 (and that GROR consent is not required for Core status')
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_NO_CODE,
response_time=datetime(2019, 2, 14))
self._submit_dvehrconsent(self.participant_id, response_time=datetime(2019, 4, 1))
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertEqual('PARTICIPANT', ps_json['enrollment_status'])
def test_ehr_consent_expired_and_renewed(self):
self._set_up_participant_data(fake_time=self.TIME_1)
# send ehr consent expired response
self._submit_ehrconsent_expired(self.participant_id, response_time=self.TIME_2)
# send a new ehr consent (renewal/reconsent)
self._submit_ehrconsent(self.participant_id,
response_code=CONSENT_PERMISSION_YES_CODE,
response_time=self.TIME_3)
ps_json = self.make_bq_participant_summary(self.participant_id)
self.assertIsNotNone(ps_json)
ehr_consents = self.get_generated_items(ps_json['consents'], item_key='consent_module',
item_value='EHRConsentPII', sort_key='consent_module_authored')
# Confirm a total of three EHR Consent responses
self.assertEqual(len(ehr_consents), 3)
# Verify the initial EHR consent details (sent by _set_up_participant_data)
self.assertEqual(ehr_consents[0].get('consent_module_authored', None),
self.TIME_1.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[0].get('consent_value', None), CONSENT_PERMISSION_YES_CODE)
# This field should be None for consent payloads that don't contain the expiration hidden question code
self.assertIsNone(ehr_consents[0].get('consent_expired', ''))
# Verify the expired consent response details (contains the hidden expiration question code / answer value)
self.assertEqual(ehr_consents[1].get('consent_module_authored', None),
self.TIME_2.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[1].get('consent_value', None), CONSENT_PERMISSION_NO_CODE)
self.assertEqual(ehr_consents[1].get('consent_expired', None), EHR_CONSENT_EXPIRED_YES)
# Verify the last EHR consent renewal; 'consent_expired' value should not be carried forward from last consent
self.assertEqual(ehr_consents[2].get('consent_module_authored', None),
self.TIME_3.strftime("%Y-%m-%dT%H:%M:%S"))
self.assertEqual(ehr_consents[2].get('consent_value', None), CONSENT_PERMISSION_YES_CODE)
# This field should be None for consent payloads that don't contain the expiration hidden question code
self.assertIsNone(ehr_consents[2].get('consent_expired', ''))
def test_ceremony_decision_fields(self):
# Set up data for different scenarios of withdrawn participants
# Clearing microseconds to avoid rounding time up in database and causing test to fail
two_days_ago = datetime.today().replace(microsecond=0) - timedelta(days=2)
withdrawal_reason_justification = 'testing withdrawal'
no_ceremony_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=True,
requests_ceremony=WithdrawalAIANCeremonyStatus.DECLINED,
withdrawal_time=two_days_ago
)
ceremony_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=True,
requests_ceremony=WithdrawalAIANCeremonyStatus.REQUESTED,
withdrawal_time=two_days_ago
)
# Non-AIAN should not have been presented with a ceremony choice
non_native_american_participant = self.data_generator.create_withdrawn_participant(
withdrawal_reason_justification=withdrawal_reason_justification,
is_native_american=False,
requests_ceremony=None,
withdrawal_time=two_days_ago
)
ps_bqs_data = self.make_bq_participant_summary(no_ceremony_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.DECLINED))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.DECLINED))
ps_bqs_data = self.make_bq_participant_summary(ceremony_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.REQUESTED))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.REQUESTED))
ps_bqs_data = self.make_bq_participant_summary(non_native_american_participant.participantId)
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status'),
str(WithdrawalAIANCeremonyStatus.UNSET))
self.assertEqual(ps_bqs_data.get('withdrawal_aian_ceremony_status_id'),
int(WithdrawalAIANCeremonyStatus.UNSET))
| |
# -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to capture the compiler invocation of any
build process. The result of that should be a compilation database.
This implementation is using the LD_PRELOAD or DYLD_INSERT_LIBRARIES
mechanisms provided by the dynamic linker. The related library is implemented
in C language and can be found under 'libear' directory.
The 'libear' library is capturing all child process creation and logging the
relevant information about it into separate files in a specified directory.
The parameter of this process is the output directory name, where the report
files shall be placed. This parameter is passed as an environment variable.
The module also implements compiler wrappers to intercept the compiler calls.
The module implements the build command execution and the post-processing of
the output files, which will condensates into a compilation database. """
import sys
import os
import os.path
import re
import itertools
import json
import glob
import argparse
import logging
import subprocess
from libear import build_libear, TemporaryDirectory
from libscanbuild import command_entry_point
from libscanbuild import duplicate_check, tempdir, initialize_logging
from libscanbuild.compilation import split_command
from libscanbuild.shell import encode, decode
__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper']
GS = chr(0x1d)
RS = chr(0x1e)
US = chr(0x1f)
COMPILER_WRAPPER_CC = 'intercept-cc'
COMPILER_WRAPPER_CXX = 'intercept-c++'
@command_entry_point
def intercept_build_main(bin_dir):
""" Entry point for 'intercept-build' command. """
parser = create_parser()
args = parser.parse_args()
initialize_logging(args.verbose)
logging.debug('Parsed arguments: %s', args)
if not args.build:
parser.print_help()
return 0
return capture(args, bin_dir)
def capture(args, bin_dir):
""" The entry point of build command interception. """
def post_processing(commands):
""" To make a compilation database, it needs to filter out commands
which are not compiler calls. Needs to find the source file name
from the arguments. And do shell escaping on the command.
To support incremental builds, it is desired to read elements from
an existing compilation database from a previous run. These elements
shall be merged with the new elements. """
# create entries from the current run
current = itertools.chain.from_iterable(
# creates a sequence of entry generators from an exec,
format_entry(command) for command in commands)
# read entries from previous run
if 'append' in args and args.append and os.path.isfile(args.cdb):
with open(args.cdb) as handle:
previous = iter(json.load(handle))
else:
previous = iter([])
# filter out duplicate entries from both
duplicate = duplicate_check(entry_hash)
return (entry
for entry in itertools.chain(previous, current)
if os.path.exists(entry['file']) and not duplicate(entry))
with TemporaryDirectory(prefix='intercept-', dir=tempdir()) as tmp_dir:
# run the build command
environment = setup_environment(args, tmp_dir, bin_dir)
logging.debug('run build in environment: %s', environment)
exit_code = subprocess.call(args.build, env=environment)
logging.info('build finished with exit code: %d', exit_code)
# read the intercepted exec calls
exec_traces = itertools.chain.from_iterable(
parse_exec_trace(os.path.join(tmp_dir, filename))
for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
# do post processing only if that was requested
if 'raw_entries' not in args or not args.raw_entries:
entries = post_processing(exec_traces)
else:
entries = exec_traces
# dump the compilation database
with open(args.cdb, 'w+') as handle:
json.dump(list(entries), handle, sort_keys=True, indent=4)
return exit_code
def setup_environment(args, destination, bin_dir):
""" Sets up the environment for the build command.
It sets the required environment variables and execute the given command.
The exec calls will be logged by the 'libear' preloaded library or by the
'wrapper' programs. """
c_compiler = args.cc if 'cc' in args else 'cc'
cxx_compiler = args.cxx if 'cxx' in args else 'c++'
libear_path = None if args.override_compiler or is_preload_disabled(
sys.platform) else build_libear(c_compiler, destination)
environment = dict(os.environ)
environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})
if not libear_path:
logging.debug('intercept gonna use compiler wrappers')
environment.update({
'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC),
'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX),
'INTERCEPT_BUILD_CC': c_compiler,
'INTERCEPT_BUILD_CXX': cxx_compiler,
'INTERCEPT_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'INFO'
})
elif sys.platform == 'darwin':
logging.debug('intercept gonna preload libear on OSX')
environment.update({
'DYLD_INSERT_LIBRARIES': libear_path,
'DYLD_FORCE_FLAT_NAMESPACE': '1'
})
else:
logging.debug('intercept gonna preload libear on UNIX')
environment.update({'LD_PRELOAD': libear_path})
return environment
def intercept_build_wrapper(cplusplus):
""" Entry point for `intercept-cc` and `intercept-c++` compiler wrappers.
It does generate execution report into target directory. And execute
the wrapped compilation with the real compiler. The parameters for
report and execution are from environment variables.
Those parameters which for 'libear' library can't have meaningful
values are faked. """
# initialize wrapper logging
logging.basicConfig(format='intercept: %(levelname)s: %(message)s',
level=os.getenv('INTERCEPT_BUILD_VERBOSE', 'INFO'))
# write report
try:
target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
if not target_dir:
raise UserWarning('exec report target directory not found')
pid = str(os.getpid())
target_file = os.path.join(target_dir, pid + '.cmd')
logging.debug('writing exec report to: %s', target_file)
with open(target_file, 'ab') as handler:
working_dir = os.getcwd()
command = US.join(sys.argv) + US
content = RS.join([pid, pid, 'wrapper', working_dir, command]) + GS
handler.write(content.encode('utf-8'))
except IOError:
logging.exception('writing exec report failed')
except UserWarning as warning:
logging.warning(warning)
# execute with real compiler
compiler = os.getenv('INTERCEPT_BUILD_CXX', 'c++') if cplusplus \
else os.getenv('INTERCEPT_BUILD_CC', 'cc')
compilation = [compiler] + sys.argv[1:]
logging.debug('execute compiler: %s', compilation)
return subprocess.call(compilation)
def parse_exec_trace(filename):
""" Parse the file generated by the 'libear' preloaded library.
Given filename points to a file which contains the basic report
generated by the interception library or wrapper command. A single
report file _might_ contain multiple process creation info. """
logging.debug('parse exec trace file: %s', filename)
with open(filename, 'r') as handler:
content = handler.read()
for group in filter(bool, content.split(GS)):
records = group.split(RS)
yield {
'pid': records[0],
'ppid': records[1],
'function': records[2],
'directory': records[3],
'command': records[4].split(US)[:-1]
}
def format_entry(exec_trace):
""" Generate the desired fields for compilation database entries. """
def abspath(cwd, name):
""" Create normalized absolute path from input filename. """
fullname = name if os.path.isabs(name) else os.path.join(cwd, name)
return os.path.normpath(fullname)
logging.debug('format this command: %s', exec_trace['command'])
compilation = split_command(exec_trace['command'])
if compilation:
for source in compilation.files:
compiler = 'c++' if compilation.compiler == 'c++' else 'cc'
command = [compiler, '-c'] + compilation.flags + [source]
logging.debug('formated as: %s', command)
yield {
'directory': exec_trace['directory'],
'command': encode(command),
'file': abspath(exec_trace['directory'], source)
}
def is_preload_disabled(platform):
""" Library-based interposition will fail silently if SIP is enabled,
so this should be detected. You can detect whether SIP is enabled on
Darwin by checking whether (1) there is a binary called 'csrutil' in
the path and, if so, (2) whether the output of executing 'csrutil status'
contains 'System Integrity Protection status: enabled'.
Same problem on linux when SELinux is enabled. The status query program
'sestatus' and the output when it's enabled 'SELinux status: enabled'. """
if platform == 'darwin':
pattern = re.compile(r'System Integrity Protection status:\s+enabled')
command = ['csrutil', 'status']
elif platform in {'linux', 'linux2'}:
pattern = re.compile(r'SELinux status:\s+enabled')
command = ['sestatus']
else:
return False
try:
lines = subprocess.check_output(command).decode('utf-8')
return any((pattern.match(line) for line in lines.splitlines()))
except:
return False
def entry_hash(entry):
""" Implement unique hash method for compilation database entries. """
# For faster lookup in set filename is reverted
filename = entry['file'][::-1]
# For faster lookup in set directory is reverted
directory = entry['directory'][::-1]
# On OS X the 'cc' and 'c++' compilers are wrappers for
# 'clang' therefore both call would be logged. To avoid
# this the hash does not contain the first word of the
# command.
command = ' '.join(decode(entry['command'])[1:])
return '<>'.join([filename, directory, command])
def create_parser():
""" Command line argument parser factory method. """
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--verbose', '-v',
action='count',
default=0,
help="""Enable verbose output from '%(prog)s'. A second and third
flag increases verbosity.""")
parser.add_argument(
'--cdb',
metavar='<file>',
default="compile_commands.json",
help="""The JSON compilation database.""")
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--append',
action='store_true',
help="""Append new entries to existing compilation database.""")
group.add_argument(
'--disable-filter', '-n',
dest='raw_entries',
action='store_true',
help="""Intercepted child process creation calls (exec calls) are all
logged to the output. The output is not a compilation database.
This flag is for debug purposes.""")
advanced = parser.add_argument_group('advanced options')
advanced.add_argument(
'--override-compiler',
action='store_true',
help="""Always resort to the compiler wrapper even when better
intercept methods are available.""")
advanced.add_argument(
'--use-cc',
metavar='<path>',
dest='cc',
default='cc',
help="""When '%(prog)s' analyzes a project by interposing a compiler
wrapper, which executes a real compiler for compilation and
do other tasks (record the compiler invocation). Because of
this interposing, '%(prog)s' does not know what compiler your
project normally uses. Instead, it simply overrides the CC
environment variable, and guesses your default compiler.
If you need '%(prog)s' to use a specific compiler for
*compilation* then you can use this option to specify a path
to that compiler.""")
advanced.add_argument(
'--use-c++',
metavar='<path>',
dest='cxx',
default='c++',
help="""This is the same as "--use-cc" but for C++ code.""")
parser.add_argument(
dest='build',
nargs=argparse.REMAINDER,
help="""Command to run.""")
return parser
| |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import inspect
import IECore
import Gaffer
import GafferTest
class ContextVariablesTest( GafferTest.TestCase ) :
def test( self ) :
n = GafferTest.StringInOutNode()
self.assertHashesValid( n )
c = Gaffer.ContextVariables()
c.setup( Gaffer.StringPlug() )
c["in"].setInput( n["out"] )
n["in"].setValue( "$a" )
self.assertEqual( c["out"].getValue(), "" )
c["variables"].addChild( Gaffer.NameValuePlug( "a", IECore.StringData( "A" ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( c["out"].getValue(), "A" )
def testDirtyPropagation( self ) :
n = GafferTest.StringInOutNode()
c = Gaffer.ContextVariables()
c.setup( Gaffer.StringPlug() )
c["in"].setInput( n["out"] )
# adding a variable should dirty the output:
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["variables"].addChild( Gaffer.NameValuePlug( "a", IECore.StringData( "A" ), "member1", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertIn( c["out"], [ p[0] for p in dirtied ] )
# modifying the variable should dirty the output:
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["variables"]["member1"]["value"].setValue("b")
self.assertIn( c["out"], [ p[0] for p in dirtied ] )
# removing the variable should also dirty the output:
dirtied = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["variables"].removeChild(c["variables"]["member1"])
self.assertIn( c["out"], [ p[0] for p in dirtied ] )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.StringInOutNode()
s["c"] = Gaffer.ContextVariables()
s["c"].setup( Gaffer.StringPlug() )
s["c"]["in"].setInput( s["n"]["out"] )
s["n"]["in"].setValue( "$a" )
self.assertEqual( s["c"]["out"].getValue(), "" )
s["c"]["variables"].addChild( Gaffer.NameValuePlug( "a", IECore.StringData( "A" ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( s["c"]["out"].getValue(), "A" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"].keys(), s["c"].keys() )
self.assertEqual( s2["c"]["out"].getValue(), "A" )
def testExtraVariables( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.StringInOutNode()
s["c"] = Gaffer.ContextVariables()
s["c"].setup( Gaffer.StringPlug() )
s["c"]["in"].setInput( s["n"]["out"] )
s["n"]["in"].setValue( "$a" )
self.assertEqual( s["c"]["out"].getValue(), "" )
dirtied = GafferTest.CapturingSlot( s["c"].plugDirtiedSignal() )
s["c"]["extraVariables"].setValue( IECore.CompoundData( { "a" : "A" } ) )
self.assertIn( s["c"]["out"], { p[0] for p in dirtied } )
self.assertEqual( s["c"]["out"].getValue(), "A" )
# Extra variables trump regular variables of the same name
s["c"]["variables"].addChild( Gaffer.NameValuePlug( "a", IECore.StringData( "B" ) ) )
self.assertEqual( s["c"]["out"].getValue(), "A" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"]["out"].getValue(), "A" )
def testExtraVariablesExpression( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.StringInOutNode()
s["c"] = Gaffer.ContextVariables()
s["c"].setup( Gaffer.StringPlug() )
s["c"]["in"].setInput( s["n"]["out"] )
s["n"]["in"].setValue( "$a$b$c" )
self.assertEqual( s["c"]["out"].getValue(), "" )
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
result = IECore.CompoundData()
if context.getFrame() > 1 :
result["a"] = "A"
if context.getFrame() > 2 :
result["b"] = "B"
if context.getFrame() > 3 :
result["c"] = "C"
parent["c"]["extraVariables"] = result
"""
) )
with Gaffer.Context() as c :
self.assertEqual( s["c"]["out"].getValue(), "" )
c.setFrame( 2 )
self.assertEqual( s["c"]["out"].getValue(), "A" )
c.setFrame( 3 )
self.assertEqual( s["c"]["out"].getValue(), "AB" )
c.setFrame( 4 )
self.assertEqual( s["c"]["out"].getValue(), "ABC" )
def testEnabledPlugAffectsOutput( self ) :
c = Gaffer.ContextVariables()
c.setup( Gaffer.StringPlug() )
cs = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["enabled"].setValue( False )
self.assertEqual( len( cs ), 2 )
self.assertEqual( { x[0] for x in cs }, { c["enabled"], c["out"] } )
def testSerialisationUsesSetup( self ) :
s1 = Gaffer.ScriptNode()
s1["c"] = Gaffer.ContextVariables()
s1["c"].setup( Gaffer.IntPlug() )
ss = s1.serialise()
self.assertIn( "setup", ss )
self.assertEqual( ss.count( "addChild" ), 1 )
self.assertNotIn( "Dynamic", ss )
self.assertNotIn( "setInput", ss )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertIn( "in", s2["c"] )
self.assertIn( "out", s2["c"] )
self.assertIsInstance( s2["c"]["in"], Gaffer.IntPlug )
self.assertIsInstance( s2["c"]["out"], Gaffer.IntPlug )
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,sys,urllib2,HTMLParser, urllib, urlparse
import xbmc, random, time, cookielib
from resources.lib.libraries import cache
from resources.lib.libraries import control
def shrink_host(url):
u = urlparse.urlparse(url)[1].split('.')
u = u[-2] + '.' + u[-1]
return u.encode('utf-8')
IE_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko'
FF_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36 OPR/34.0.2036.50'
IOS_USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25'
ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
#SMU_USER_AGENT = 'URLResolver for Kodi/%s' % (addon_version)
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30'):
try:
#control.log('@@@@@@@@@@@@@@ - URL:%s' % url)
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie2' or output == 'cookie' or output == 'extended' or not close == True:
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
if sys.version_info < (2, 7, 9): raise Exception()
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
#headers['User-Agent'] = agent()
headers['User-Agent'] = cache.get(randomagent, 1)
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
if 'Referer' in headers:
pass
elif referer == None:
headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
else:
headers['Referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'Cookie' in headers:
pass
elif not cookie == None:
headers['Cookie'] = cookie
if redirect == False:
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response): return response
opener = urllib2.build_opener(NoRedirection)
opener = urllib2.install_opener(opener)
try: del headers['Referer']
except: pass
request = urllib2.Request(url, data=post, headers=headers)
#print request
try:
response = urllib2.urlopen(request, timeout=int(timeout))
except urllib2.HTTPError as response:
control.log("AAAA- CODE %s|%s " % (url, response.code))
if response.code == 503:
if 'cf-browser-verification' in response.read(5242880):
control.log("CF-OK")
netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'], timeout)
headers['Cookie'] = cf
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
elif error == False:
return
elif response.code == 307:
control.log("AAAA- Response read: %s" % response.read(5242880))
control.log("AAAA- Location: %s" % (response.headers['Location'].rstrip()))
cookie = ''
try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
except: pass
headers['Cookie'] = cookie
request = urllib2.Request(response.headers['Location'], data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
#control.log("AAAA- BBBBBBB %s" % response.code)
elif error == False:
print ("Response code",response.code, response.msg,url)
return
if output == 'cookie':
try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
except: pass
try: result = cf
except: pass
elif output == 'response':
if limit == '0':
result = (str(response.code), response.read(224 * 1024))
elif not limit == None:
result = (str(response.code), response.read(int(limit) * 1024))
else:
result = (str(response.code), response.read(5242880))
elif output == 'chunk':
try: content = int(response.headers['Content-Length'])
except: content = (2049 * 1024)
#control.log('CHUNK %s|%s' % (url,content))
if content < (2048 * 1024):return
result = response.read(16 * 1024)
if close == True: response.close()
return result
elif output == 'extended':
try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
except: pass
try: cookie = cf
except: pass
content = response.headers
result = response.read(5242880)
return (result, headers, content, cookie)
elif output == 'geturl':
result = response.geturl()
elif output == 'headers':
content = response.headers
return content
else:
if limit == '0':
result = response.read(224 * 1024)
elif not limit == None:
result = response.read(int(limit) * 1024)
else:
result = response.read(5242880)
if close == True:
response.close()
return result
except Exception as e:
control.log('Client ERR %s, url:' % (e,url))
return
def source(url, close=True, error=False, proxy=None, post=None, headers=None, mobile=False, safe=False, referer=None, cookie=None, output='', timeout='30'):
return request(url, close, error, proxy, post, headers, mobile, safe, referer, cookie, output, timeout)
def parseDOM(html, name=u"", attrs={}, ret=False):
# Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return u""
if not name.strip():
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1: # Try matching without quotation marks
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
lst = lst2
lst2 = []
else:
test = range(len(lst))
test.reverse()
for i in test: # Delete anything missing from the next list.
if not lst[i] in lst2:
del(lst[i])
if len(lst) == 0 and attrs == {}:
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
if isinstance(ret, str):
lst2 = []
for match in lst:
attr_lst = re.compile('<' + name + '.*?' + ret + '=([\'"].[^>]*?[\'"])>', re.M | re.S).findall(match)
if len(attr_lst) == 0:
attr_lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
for tmp in attr_lst:
cont_char = tmp[0]
if cont_char in "'\"":
# Limit down to next variable.
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
# Limit to the last quotation mark
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
lst2.append(tmp.strip())
lst = lst2
else:
lst2 = []
for match in lst:
endstr = u"</" + name
start = item.find(match)
end = item.find(endstr, start)
pos = item.find("<" + name, start + 1 )
while pos < end and pos != -1:
tend = item.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = item.find("<" + name, pos + 1)
if start == -1 and end == -1:
temp = u""
elif start > -1 and end > -1:
temp = item[start + len(match):end]
elif end > -1:
temp = item[:end]
elif start > -1:
temp = item[start + len(match):]
if ret:
endstr = item[end:item.find(">", item.find(endstr)) + 1]
temp = match + temp + endstr
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
return ret_lst
def replaceHTMLCodes(txt):
txt = re.sub("(&#[0-9]+)([^;^0-9]+)", "\\1;\\2", txt)
txt = HTMLParser.HTMLParser().unescape(txt)
txt = txt.replace(""", "\"")
txt = txt.replace("&", "&")
return txt
def cleanHTMLCodes(txt):
txt = txt.replace("'", "")
txt = re.sub("(&#[0-9]+)([^;^0-9]+)", "\\1;\\2", txt)
txt = HTMLParser.HTMLParser().unescape(txt)
txt = txt.replace(""", "\"")
txt = txt.replace("&", "&")
return txt
def agent():
return randomagent()
def randomagent():
BR_VERS = [
['%s.0' % i for i in xrange(18, 43)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'],
['11.0']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
index = random.randrange(len(RAND_UAS))
return RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index]))
def googletag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
#control.log('<><><><><><><><><><><><> %s <><><><><><><><><>' % quality)
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94', '59']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
def file_quality_openload(url):
try:
if '1080' in url:
return {'quality': '1080p'}
elif '720' in url:
return {'quality': 'HD'}
else:
return {'quality': 'SD'}
except:
return {'quality': 'SD', 'url': url}
def cfcookie(netloc, ua, timeout):
try:
headers = {'User-Agent': ua}
request = urllib2.Request(netloc, headers=headers)
try:
response = urllib2.urlopen(request, timeout=int(timeout))
except urllib2.HTTPError as response:
result = response.read(5242880)
jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]
init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};', result)[-1]
builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line) > 0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
answer = decryptVal + len(urlparse.urlparse(netloc).netloc)
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (netloc, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval = re.findall('name="pass" value="(.*?)"', result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (netloc, urllib.quote_plus(passval), jschl, answer)
time.sleep(5)
cookies = cookielib.LWPCookieJar()
handlers = [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
request = urllib2.Request(query, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
except:
pass
cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
return cookie
except:
pass
def cfcookie(netloc, ua, timeout):
try:
headers = {'User-Agent': ua}
request = urllib2.Request(netloc, headers=headers)
try:
response = urllib2.urlopen(request, timeout=int(timeout))
except urllib2.HTTPError as response:
result = response.read(5242880)
jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]
init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};', result)[-1]
builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line) > 0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
answer = decryptVal + len(urlparse.urlparse(netloc).netloc)
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (netloc, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval = re.findall('name="pass" value="(.*?)"', result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (netloc, urllib.quote_plus(passval), jschl, answer)
time.sleep(5)
cookies = cookielib.LWPCookieJar()
handlers = [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
request = urllib2.Request(query, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
except:
pass
cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
return cookie
except:
pass
def parseJSString(s):
try:
offset=1 if s[0]=='+' else 0
val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
return val
except:
pass
def googlepass(url):
try:
try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except: headers = None
url = request(url.split('|')[0], headers=headers, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
if headers: url += '|%s' % urllib.urlencode(headers)
return url
except:
return
def cleanhtmltags(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key), byteify(value)) for key, value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
| |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE, OP_DROP
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
# This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window).
# TODO: Rewrite this test to support SegWit being always active.
self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex", "-deprecatedrpc=addwitnessaddress"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
self.segwit_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK|NODE_WITNESS)
self.old_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
network_thread_start()
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.