gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
iterator_to_value_set, LazyValueWrapper, ValueWrapper
from jedi.inference.compiled import builtin_from_name
from jedi.inference.value.klass import ClassFilter
from jedi.inference.value.klass import ClassMixin
from jedi.inference.utils import to_list
from jedi.inference.names import AbstractNameDefinition, ValueName
from jedi.inference.context import ClassContext
from jedi.inference.gradual.generics import TupleGenericManager
class _BoundTypeVarName(AbstractNameDefinition):
"""
This type var was bound to a certain type, e.g. int.
"""
def __init__(self, type_var, value_set):
self._type_var = type_var
self.parent_context = type_var.parent_context
self._value_set = value_set
def infer(self):
def iter_():
for value in self._value_set:
# Replace any with the constraints if they are there.
from jedi.inference.gradual.typing import Any
if isinstance(value, Any):
for constraint in self._type_var.constraints:
yield constraint
else:
yield value
return ValueSet(iter_())
def py__name__(self):
return self._type_var.py__name__()
def __repr__(self):
return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._value_set)
class _TypeVarFilter(object):
"""
A filter for all given variables in a class.
A = TypeVar('A')
B = TypeVar('B')
class Foo(Mapping[A, B]):
...
In this example we would have two type vars given: A and B
"""
def __init__(self, generics, type_vars):
self._generics = generics
self._type_vars = type_vars
def get(self, name):
for i, type_var in enumerate(self._type_vars):
if type_var.py__name__() == name:
try:
return [_BoundTypeVarName(type_var, self._generics[i])]
except IndexError:
return [type_var.name]
return []
def values(self):
# The values are not relevant. If it's not searched exactly, the type
# vars are just global and should be looked up as that.
return []
class _AnnotatedClassContext(ClassContext):
def get_filters(self, *args, **kwargs):
filters = super(_AnnotatedClassContext, self).get_filters(
*args, **kwargs
)
for f in filters:
yield f
# The type vars can only be looked up if it's a global search and
# not a direct lookup on the class.
yield self._value.get_type_var_filter()
class DefineGenericBase(LazyValueWrapper):
def __init__(self, generics_manager):
self._generics_manager = generics_manager
def _create_instance_with_generics(self, generics_manager):
raise NotImplementedError
@inference_state_method_cache()
def get_generics(self):
return self._generics_manager.to_tuple()
def define_generics(self, type_var_dict):
from jedi.inference.gradual.type_var import TypeVar
changed = False
new_generics = []
for generic_set in self.get_generics():
values = NO_VALUES
for generic in generic_set:
if isinstance(generic, (DefineGenericBase, TypeVar)):
result = generic.define_generics(type_var_dict)
values |= result
if result != ValueSet({generic}):
changed = True
else:
values |= ValueSet([generic])
new_generics.append(values)
if not changed:
# There might not be any type vars that change. In that case just
# return itself, because it does not make sense to potentially lose
# cached results.
return ValueSet([self])
return ValueSet([self._create_instance_with_generics(
TupleGenericManager(tuple(new_generics))
)])
def is_same_class(self, other):
if not isinstance(other, DefineGenericBase):
return False
if self.tree_node != other.tree_node:
# TODO not sure if this is nice.
return False
given_params1 = self.get_generics()
given_params2 = other.get_generics()
if len(given_params1) != len(given_params2):
# If the amount of type vars doesn't match, the class doesn't
# match.
return False
# Now compare generics
return all(
any(
# TODO why is this ordering the correct one?
cls2.is_same_class(cls1)
for cls1 in class_set1
for cls2 in class_set2
) for class_set1, class_set2 in zip(given_params1, given_params2)
)
def __repr__(self):
return '<%s: %s%s>' % (
self.__class__.__name__,
self._wrapped_value,
list(self.get_generics()),
)
class GenericClass(ClassMixin, DefineGenericBase):
"""
A class that is defined with generics, might be something simple like:
class Foo(Generic[T]): ...
my_foo_int_cls = Foo[int]
"""
def __init__(self, class_value, generics_manager):
super(GenericClass, self).__init__(generics_manager)
self._class_value = class_value
def _get_wrapped_value(self):
return self._class_value
def get_type_hint(self, add_class_info=True):
n = self.py__name__()
# Not sure if this is the best way to do this, but all of these types
# are a bit special in that they have type aliases and other ways to
# become lower case. It's probably better to make them upper case,
# because that's what you can use in annotations.
n = dict(list="List", dict="Dict", set="Set", tuple="Tuple").get(n, n)
s = n + self._generics_manager.get_type_hint()
if add_class_info:
return 'Type[%s]' % s
return s
def get_type_var_filter(self):
return _TypeVarFilter(self.get_generics(), self.list_type_vars())
def py__call__(self, arguments):
instance, = super(GenericClass, self).py__call__(arguments)
return ValueSet([_GenericInstanceWrapper(instance)])
def _as_context(self):
return _AnnotatedClassContext(self)
@to_list
def py__bases__(self):
for base in self._wrapped_value.py__bases__():
yield _LazyGenericBaseClass(self, base)
def _create_instance_with_generics(self, generics_manager):
return GenericClass(self._class_value, generics_manager)
def is_sub_class_of(self, class_value):
if super(GenericClass, self).is_sub_class_of(class_value):
return True
return self._class_value.is_sub_class_of(class_value)
def infer_type_vars(self, value_set, is_class_value=False):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
annotation_name = self.py__name__()
type_var_dict = {}
if annotation_name == 'Iterable' and not is_class_value:
annotation_generics = self.get_generics()
if annotation_generics:
return annotation_generics[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# Note: we need to handle the MRO _in order_, so we need to extract
# the elements from the set first, then handle them, even if we put
# them back in a set afterwards.
for py_class in value_set:
if not is_class_value:
if py_class.is_instance() and not py_class.is_compiled():
py_class = py_class.get_annotated_class_object()
else:
continue
if py_class.api_type != u'class':
# Functions & modules don't have an MRO and we're not
# expecting a Callable (those are handled separately within
# TypingClassValueWithIndex).
continue
for parent_class in py_class.py__mro__():
class_name = parent_class.py__name__()
if annotation_name == class_name:
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self, parent_class),
)
break
return type_var_dict
class _LazyGenericBaseClass(object):
def __init__(self, class_value, lazy_base_class):
self._class_value = class_value
self._lazy_base_class = lazy_base_class
@iterator_to_value_set
def infer(self):
for base in self._lazy_base_class.infer():
if isinstance(base, GenericClass):
# Here we have to recalculate the given types.
yield GenericClass.create_cached(
base.inference_state,
base._wrapped_value,
TupleGenericManager(tuple(self._remap_type_vars(base))),
)
else:
yield base
def _remap_type_vars(self, base):
from jedi.inference.gradual.type_var import TypeVar
filter = self._class_value.get_type_var_filter()
for type_var_set in base.get_generics():
new = NO_VALUES
for type_var in type_var_set:
if isinstance(type_var, TypeVar):
names = filter.get(type_var.py__name__())
new |= ValueSet.from_sets(
name.infer() for name in names
)
else:
# Mostly will be type vars, except if in some cases
# a concrete type will already be there. In that
# case just add it to the value set.
new |= ValueSet([type_var])
yield new
class _GenericInstanceWrapper(ValueWrapper):
def py__stop_iteration_returns(self):
for cls in self._wrapped_value.class_value.py__mro__():
if cls.py__name__() == 'Generator':
generics = cls.get_generics()
try:
return generics[2].execute_annotation()
except IndexError:
pass
elif cls.py__name__() == 'Iterator':
return ValueSet([builtin_from_name(self.inference_state, u'None')])
return self._wrapped_value.py__stop_iteration_returns()
def get_type_hint(self, add_class_info=True):
return self._wrapped_value.class_value.get_type_hint(add_class_info=False)
class _PseudoTreeNameClass(Value):
"""
In typeshed, some classes are defined like this:
Tuple: _SpecialForm = ...
Now this is not a real class, therefore we have to do some workarounds like
this class. Essentially this class makes it possible to goto that `Tuple`
name, without affecting anything else negatively.
"""
def __init__(self, parent_context, tree_name):
super(_PseudoTreeNameClass, self).__init__(
parent_context.inference_state,
parent_context
)
self._tree_name = tree_name
@property
def tree_node(self):
return self._tree_name
def get_filters(self, *args, **kwargs):
# TODO this is obviously wrong. Is it though?
class EmptyFilter(ClassFilter):
def __init__(self):
pass
def get(self, name, **kwargs):
return []
def values(self, **kwargs):
return []
yield EmptyFilter()
def py__class__(self):
# TODO this is obviously not correct, but at least gives us a class if
# we have none. Some of these objects don't really have a base class in
# typeshed.
return builtin_from_name(self.inference_state, u'object')
@property
def name(self):
return ValueName(self, self._tree_name)
def get_qualified_names(self):
return (self._tree_name.value,)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
class BaseTypingValue(LazyValueWrapper):
def __init__(self, parent_context, tree_name):
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._tree_name = tree_name
@property
def name(self):
return ValueName(self, self._tree_name)
def _get_wrapped_value(self):
return _PseudoTreeNameClass(self.parent_context, self._tree_name)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
class BaseTypingValueWithGenerics(DefineGenericBase):
def __init__(self, parent_context, tree_name, generics_manager):
super(BaseTypingValueWithGenerics, self).__init__(generics_manager)
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._tree_name = tree_name
def _get_wrapped_value(self):
return _PseudoTreeNameClass(self.parent_context, self._tree_name)
def __repr__(self):
return '%s(%s%s)' % (self.__class__.__name__, self._tree_name.value,
self._generics_manager)
|
|
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import DetailView
from django.views.generic import ListView
from django.views.generic import UpdateView
from django.views.generic import View
from reversion import revisions as reversion
from devices.forms import VIEWSORTING
from devices.forms import FilterForm
from devices.forms import ViewForm
from devices.models import Device
from devicetypes.forms import TypeForm
from devicetypes.models import Type
from devicetypes.models import TypeAttribute
from Lagerregal.utils import PaginationMixin
from users.mixins import PermissionRequiredMixin
class TypeList(PermissionRequiredMixin, PaginationMixin, ListView):
model = Type
context_object_name = 'type_list'
permission_required = 'devicetypes.view_type'
def get_queryset(self):
'''method for query all devicetypes and present the results depending on existing filter'''
devicetype = Type.objects.all()
# filtering with existing filterstring
self.filterstring = self.request.GET.get("filter", None)
if self.filterstring:
devicetype = devicetype.filter(name__icontains=self.filterstring)
# sort list of results (name or ID ascending or descending)
self.viewsorting = self.request.GET.get("sorting", "name")
if self.viewsorting in [s[0] for s in VIEWSORTING]:
devicetype = devicetype.order_by(self.viewsorting)
return devicetype
def get_context_data(self, **kwargs):
'''method for getting context data for filtering, viewsorting and breadcrumbs'''
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context["breadcrumbs"] = [
(reverse("type-list"), _("Devicetypes")), ]
context["viewform"] = ViewForm(initial={"sorting": self.viewsorting})
# filtering
if self.filterstring:
context["filterform"] = FilterForm(initial={"filter": self.filterstring})
else:
context["filterform"] = FilterForm()
# show page number in breadcrumbs
if context["is_paginated"] and context["page_obj"].number > 1:
context["breadcrumbs"].append(["", context["page_obj"].number])
return context
class TypeDetail(PermissionRequiredMixin, DetailView):
model = Type
context_object_name = 'object'
template_name = "devicetypes/type_detail.html"
permission_required = 'devicetypes.view_type'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
# adds data of related devices and attributes
context["merge_list"] = Type.objects.exclude(pk=context["object"].pk).order_by("name")
context['device_list'] = Device.objects.filter(devicetype=context["object"], archived=None, trashed=None)
context["attribute_list"] = TypeAttribute.objects.filter(devicetype=context["object"])
# use given label template if existing
if "type" in settings.LABEL_TEMPLATES:
context["label_js"] = ""
for attribute in settings.LABEL_TEMPLATES["type"][1]:
context["label_js"] += "\n" + "label.setObjectText('{0}', '{1}');".format(attribute,
getattr(context["object"],
attribute))
# show chosen type in breadcrumbs
context["breadcrumbs"] = [
(reverse("type-list"), _("Devicetypes")),
(reverse("type-detail", kwargs={"pk": context["object"].pk}), context["object"])]
return context
class TypeCreate(PermissionRequiredMixin, CreateView):
form_class = TypeForm
template_name = 'devicetypes/type_form.html'
permission_required = 'devicetypes.add_type'
def get_context_data(self, **kwargs):
'''method for getting context data and show in breadcrumbs'''
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['actionstring'] = _("Create new Devicetype")
context['type'] = "type"
# adds "Create new Devicetype" to breadcrumbs
context["breadcrumbs"] = [
(reverse("type-list"), _("Devicetypes")),
("", _("Create new Devicetype"))]
return context
def form_valid(self, form):
newobject = form.save()
# creating new attributes to devicetype
for key, value in form.cleaned_data.items():
if key.startswith("extra_field_") and value != "":
attribute = TypeAttribute()
attribute.name = value
attribute.devicetype = newobject
attribute.save()
return HttpResponseRedirect(newobject.get_absolute_url())
class TypeUpdate(PermissionRequiredMixin, UpdateView):
form_class = TypeForm
model = Type
template_name = 'devicetypes/type_form.html'
permission_required = 'devicetypes.change_type'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['actionstring'] = _("Update")
context["attribute_list"] = TypeAttribute.objects.filter(devicetype=context["object"])
context["form"].fields.pop("extra_field_0")
context["form"]["extra_fieldcount"].initial = context["attribute_list"].count()
# adds "Edit" to breadcrumbs
context["breadcrumbs"] = [
(reverse("type-list"), _("Devicetypes")),
(reverse("type-detail", kwargs={"pk": context["object"].pk}), context["object"]),
("", _("Edit"))]
return context
class TypeDelete(PermissionRequiredMixin, DeleteView):
model = Type
success_url = reverse_lazy('type-list')
template_name = 'devices/base_delete.html'
permission_required = 'devicetypes.delete_type'
# !!!! there is no forwarding or loading so the code is never run
# def get_context_data(self, **kwargs):
# # Call the base implementation first to get a context
# context = super().get_context_data(**kwargs)
#
# # should add "Delete" to breadcrumbs
# context["breadcrumbs"] = [
# (reverse("type-list"), _("Devicetypes")),
# (reverse("type-detail", kwargs={"pk": context["object"].pk}), context["object"]),
# ("", _("Delete"))]
#
# return context
class TypeMerge(PermissionRequiredMixin, View):
model = Type
permission_required = 'devicetypes.change_type'
def get(self, request, *args, **kwargs):
context = {}
context["oldobject"] = get_object_or_404(self.model, pk=kwargs["oldpk"])
context["newobject"] = get_object_or_404(self.model, pk=kwargs["newpk"])
# adds "Merge with devicetype name" to breadcrumbs
context["breadcrumbs"] = [
(reverse("type-list"), _("Devicetypes")),
(reverse("type-detail", kwargs={"pk": context["oldobject"].pk}), context["oldobject"]),
("", _("Merge with {0}".format(context["newobject"])))]
return render(request, 'devicetypes/type_merge.html', context)
def post(self, request, *args, **kwargs):
oldobject = get_object_or_404(self.model, pk=kwargs["oldpk"])
newobject = get_object_or_404(self.model, pk=kwargs["newpk"])
# adds all devices of old devicetype to new devicetype
devices = Device.objects.filter(devicetype=oldobject)
for device in devices:
device.devicetype = newobject
reversion.set_comment(_("Merged Devicetype {0} into {1}".format(oldobject, newobject)))
device.save()
if "remove_attributes" in request.POST and request.POST["remove_attributes"] == "on":
for device in devices:
attributes = device.typeattributevalue_set.all()
if len(attributes) == 0:
continue
if device.description is None:
device.description = ""
device.description += "\n\n==== Archived Attributes"
for attribute in attributes:
device.description += "\n{0}: {1}".format(attribute.typeattribute.name, attribute.value)
device.save()
else:
# adds all attributes of old devicetype to new devicetype
attributes = TypeAttribute.objects.filter(devicetype=oldobject)
for attribute in attributes:
attribute.devicetype = newobject
attribute.save()
oldobject.delete()
return HttpResponseRedirect(newobject.get_absolute_url())
######################################################################################################################
# attribute related views #
######################################################################################################################
class TypeAttributeCreate(PermissionRequiredMixin, CreateView):
model = TypeAttribute
template_name = 'devices/base_form.html'
fields = '__all__'
permission_required = 'devicetypes.change_type'
class TypeAttributeUpdate(PermissionRequiredMixin, UpdateView):
model = TypeAttribute
template_name = 'devices/base_form.html'
fields = '__all__'
permission_required = 'devicetypes.change_type'
def post(self, request, *args, **kwargs):
self.success_url = reverse('type-detail',
kwargs={'pk': request.POST['devicetype']})
return super().post(request, *args, **kwargs)
def get_success_url(self):
return self.success_url
class TypeAttributeDelete(PermissionRequiredMixin, DeleteView):
model = TypeAttribute
success_url = reverse_lazy('type-list')
template_name = 'devices/base_delete.html'
permission_required = 'devicetypes.change_type'
def post(self, request, *args, **kwargs):
self.next = request.POST["next"]
return super().post(request, **kwargs)
def get_success_url(self):
return self.next
|
|
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cgsnapshots api."""
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
def make_cgsnapshot(elem):
elem.set('id')
elem.set('consistencygroup_id')
elem.set('status')
elem.set('created_at')
elem.set('name')
elem.set('description')
class CgsnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cgsnapshot', selector='cgsnapshot')
make_cgsnapshot(root)
alias = Cgsnapshots.alias
namespace = Cgsnapshots.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CgsnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cgsnapshots')
elem = xmlutil.SubTemplateElement(root, 'cgsnapshot',
selector='cgsnapshots')
make_cgsnapshot(elem)
alias = Cgsnapshots.alias
namespace = Cgsnapshots.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
cgsnapshot = self._extract_cgsnapshot(dom)
return {'body': {'cgsnapshot': cgsnapshot}}
def _extract_cgsnapshot(self, node):
cgsnapshot = {}
cgsnapshot_node = self.find_first_child_named(node, 'cgsnapshot')
attributes = ['name',
'description']
for attr in attributes:
if cgsnapshot_node.getAttribute(attr):
cgsnapshot[attr] = cgsnapshot_node.getAttribute(attr)
return cgsnapshot
class CgsnapshotsController(wsgi.Controller):
"""The cgsnapshots API controller for the OpenStack API."""
_view_builder_class = cgsnapshot_views.ViewBuilder
def __init__(self):
self.cgsnapshot_api = consistencygroupAPI.API()
super(CgsnapshotsController, self).__init__()
@wsgi.serializers(xml=CgsnapshotTemplate)
def show(self, req, id):
"""Return data about the given cgsnapshot."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
context,
cgsnapshot_id=id)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, cgsnapshot)
def delete(self, req, id):
"""Delete a cgsnapshot."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete cgsnapshot with id: %s'), id, context=context)
try:
cgsnapshot = self.cgsnapshot_api.get_cgsnapshot(
context,
cgsnapshot_id=id)
self.cgsnapshot_api.delete_cgsnapshot(context, cgsnapshot)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidCgSnapshot:
msg = _("Invalid cgsnapshot")
raise exc.HTTPBadRequest(explanation=msg)
except Exception:
msg = _("Failed cgsnapshot")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=CgsnapshotsTemplate)
def index(self, req):
"""Returns a summary list of cgsnapshots."""
return self._get_cgsnapshots(req, is_detail=False)
@wsgi.serializers(xml=CgsnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of cgsnapshots."""
return self._get_cgsnapshots(req, is_detail=True)
def _get_cgsnapshots(self, req, is_detail):
"""Returns a list of cgsnapshots, transformed through view builder."""
context = req.environ['cinder.context']
cgsnapshots = self.cgsnapshot_api.get_all_cgsnapshots(context)
limited_list = common.limited(cgsnapshots, req)
if is_detail:
cgsnapshots = self._view_builder.detail_list(req, limited_list)
else:
cgsnapshots = self._view_builder.summary_list(req, limited_list)
return cgsnapshots
@wsgi.response(202)
@wsgi.serializers(xml=CgsnapshotTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new cgsnapshot."""
LOG.debug('Creating new cgsnapshot %s', body)
self.assert_valid_body(body, 'cgsnapshot')
context = req.environ['cinder.context']
cgsnapshot = body['cgsnapshot']
self.validate_name_and_description(cgsnapshot)
try:
group_id = cgsnapshot['consistencygroup_id']
except KeyError:
msg = _("'consistencygroup_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
try:
group = self.cgsnapshot_api.get(context, group_id)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
name = cgsnapshot.get('name', None)
description = cgsnapshot.get('description', None)
LOG.info(_LI("Creating cgsnapshot %(name)s."),
{'name': name},
context=context)
try:
new_cgsnapshot = self.cgsnapshot_api.create_cgsnapshot(
context, group, name, description)
except exception.InvalidCgSnapshot as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
retval = self._view_builder.summary(req, new_cgsnapshot)
return retval
class Cgsnapshots(extensions.ExtensionDescriptor):
"""cgsnapshots support."""
name = 'Cgsnapshots'
alias = 'cgsnapshots'
namespace = 'http://docs.openstack.org/volume/ext/cgsnapshots/api/v1'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Cgsnapshots.alias, CgsnapshotsController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.platform import models
from core.tests import test_utils
(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
CREATED_ON_FIELD = 'created_on'
LAST_UPDATED_FIELD = 'last_updated'
DELETED_FIELD = 'deleted'
FIELDS_NOT_REQUIRED = [CREATED_ON_FIELD, LAST_UPDATED_FIELD, DELETED_FIELD]
class FeedbackThreadModelTest(test_utils.GenericTestBase):
"""Tests for the FeedbackThreadModel class."""
def test_get_exploration_and_thread_ids(self):
# Generate some full thread ids.
full_thread_id_1 = (
feedback_models.FeedbackThreadModel.generate_full_thread_id(
'exp_id_1', 'thread_id_1'))
full_thread_id_2 = (
feedback_models.FeedbackThreadModel.generate_full_thread_id(
'exp_id_2', 'thread_id_2'))
exploration_ids, thread_ids = (
feedback_models.FeedbackThreadModel.get_exploration_and_thread_ids(
[full_thread_id_1, full_thread_id_2]))
self.assertEqual(exploration_ids, ('exp_id_1', 'exp_id_2'))
self.assertEqual(thread_ids, ('thread_id_1', 'thread_id_2'))
class FeedbackThreadUserModelTest(test_utils.GenericTestBase):
"""Tests for the FeedbackThreadUserModel class."""
def test_create_new_object(self):
feedback_models.FeedbackThreadUserModel.create(
'user_id', 'exp_id', 'thread_id')
feedback_thread_user_model = (
feedback_models.FeedbackThreadUserModel.get(
'user_id', 'exp_id', 'thread_id'))
self.assertEqual(
feedback_thread_user_model.id, 'user_id.exp_id.thread_id')
self.assertEqual(
feedback_thread_user_model.message_ids_read_by_user, [])
def test_get_object(self):
feedback_models.FeedbackThreadUserModel.create(
'user_id', 'exp_id', 'thread_id')
expected_model = feedback_models.FeedbackThreadUserModel(
id='user_id.exp_id.thread_id',
message_ids_read_by_user=[])
actual_model = (
feedback_models.FeedbackThreadUserModel.get(
'user_id', 'exp_id', 'thread_id'))
self.assertEqual(actual_model.id, expected_model.id)
self.assertEqual(
actual_model.message_ids_read_by_user,
expected_model.message_ids_read_by_user)
def test_get_multi(self):
feedback_models.FeedbackThreadUserModel.create(
'user_id', 'exp_id', 'thread_id_1')
feedback_models.FeedbackThreadUserModel.create(
'user_id', 'exp_id', 'thread_id_2')
expected_model_1 = feedback_models.FeedbackThreadUserModel(
id='user_id.exp_id.thread_id_1',
message_ids_read_by_user=[])
expected_model_2 = feedback_models.FeedbackThreadUserModel(
id='user_id.exp_id.thread_id_2',
message_ids_read_by_user=[])
actual_models = feedback_models.FeedbackThreadUserModel.get_multi(
'user_id', ['exp_id', 'exp_id'], ['thread_id_1', 'thread_id_2'])
actual_model_1 = actual_models[0]
actual_model_2 = actual_models[1]
self.assertEqual(actual_model_1.id, expected_model_1.id)
self.assertEqual(
actual_model_1.message_ids_read_by_user,
expected_model_1.message_ids_read_by_user)
self.assertEqual(actual_model_2.id, expected_model_2.id)
self.assertEqual(
actual_model_2.message_ids_read_by_user,
expected_model_2.message_ids_read_by_user)
class SuggestionModelTest(test_utils.GenericTestBase):
"""Tests the SuggestionModel class."""
def setUp(self):
super(SuggestionModelTest, self).setUp()
feedback_models.SuggestionModel.create(
'exp_id1', 'thread_id1', 'author_id', 1, 'state_name',
'description', 'suggestion_text')
feedback_models.SuggestionModel.create(
'exp_id1', 'thread_id2', 'author_id', 1, 'state_name',
'description', 'suggestion_text')
feedback_models.SuggestionModel.create(
'exp_id2', 'thread_id2', 'author_id', 1, 'state_name',
'description', 'suggestion_text')
def _get_suggestion_models_for_test(self, suggestions_list):
"""Removes fields that are set to default values in the base model and
are thus not explicitly verified in tests."""
updated_suggestions_list = []
for suggestion in suggestions_list:
suggestion_dict = suggestion.to_dict()
for field in FIELDS_NOT_REQUIRED:
if field in suggestion_dict:
suggestion_dict.pop(field)
updated_suggestions_list.append(suggestion_dict)
return updated_suggestions_list
def test_create_new_object_runs_successfully(self):
feedback_models.SuggestionModel.create(
'exp_id3', 'thread_id2', 'author_id', 1, 'state_name',
'description', 'suggestion_text')
suggestion = (
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'exp_id3', 'thread_id2'))
self.assertEqual(suggestion.exploration_id, 'exp_id3')
self.assertEqual(suggestion.author_id, 'author_id')
self.assertEqual(suggestion.exploration_version, 1)
self.assertEqual(suggestion.state_name, 'state_name')
self.assertEqual(suggestion.description, 'description')
self.assertEqual(suggestion.state_content, {
'type': 'text',
'value': 'suggestion_text',
})
def test_create_suggestion_fails_if_thread_already_has_suggestion(self):
with self.assertRaisesRegexp(Exception, 'There is already a feedback '
'thread with the given thread id: '
'exp_id1.thread_id1'):
feedback_models.SuggestionModel.create(
'exp_id1', 'thread_id1', 'author_id', 1, 'state_name',
'description', 'suggestion_text')
def test_get_by_exploration_and_thread_id_suggestion_present(self):
actual_suggestion = [(
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'exp_id1', 'thread_id1'))]
expected_suggestion = [feedback_models.SuggestionModel(
id='exp_id1.thread_id1',
author_id='author_id',
exploration_id='exp_id1',
exploration_version=1,
state_name='state_name',
description='description',
state_content={
'type': 'text',
'value': 'suggestion_text'
}
)]
self.assertEqual(len(self._get_suggestion_models_for_test(
actual_suggestion)), 1)
self.assertEqual(
self._get_suggestion_models_for_test(expected_suggestion),
self._get_suggestion_models_for_test(actual_suggestion))
def test_get_by_exploration_and_thread_id_no_suggestion(self):
actual_suggestion = (
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'invalid_exp_id', 'thread_id1'))
self.assertIsNone(actual_suggestion)
def test_get_suggestion_html(self):
suggestion = (
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'exp_id2', 'thread_id2'))
self.assertEqual(suggestion.get_suggestion_html(), 'suggestion_text')
class UnsentFeedbackEmailModelTest(test_utils.GenericTestBase):
"""Tests for FeedbackMessageEmailDataModel class"""
def test_new_instances_stores_correct_data(self):
user_id = 'A'
message_reference_dict = {
'exploration_id': 'ABC123',
'thread_id': 'thread_id1',
'message_id': 'message_id1'
}
email_instance = feedback_models.UnsentFeedbackEmailModel(
id=user_id, feedback_message_references=[message_reference_dict])
email_instance.put()
retrieved_instance = (
feedback_models.UnsentFeedbackEmailModel.get_by_id(id=user_id))
self.assertEqual(
retrieved_instance.feedback_message_references,
[message_reference_dict])
self.assertEqual(retrieved_instance.retries, 0)
|
|
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from collections import OrderedDict # Python 2.7
except ImportError:
from ordereddict import OrderedDict # Python 2.6
import re
import urllib2 as urllib
class ConfigurationSettingsType(type):
""" Metaclass for constructing ConfigurationSettings classes.
Instances of the ConfigurationSettingsType construct ConfigurationSettings
classes from a base ConfigurationSettings class and a dictionary of
configuration settings. The settings in the dictionary are validated against
the settings in the base class. You cannot add settings, you can only
change their backing-field values and you cannot modify settings without
backing-field values. These are considered fixed configuration setting
values.
This is an internal class used in two places:
+ decorators.Configuration.__call__
Adds a ConfigurationSettings attribute to a SearchCommand class.
+ reporting_command.ReportingCommand.fix_up
Adds a ConfigurationSettings attribute to a ReportingCommand.map method,
if there is one.
"""
def __new__(cls, module, name, bases, settings):
cls = super(ConfigurationSettingsType, cls).__new__(
cls, name, bases, {})
return cls
def __init__(cls, module, name, bases, settings):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
configuration_settings = cls.configuration_settings()
for name, value in settings.iteritems():
try:
prop, backing_field = configuration_settings[name]
except KeyError:
raise AttributeError(
'%s has no %s configuration setting' % (cls, name))
if backing_field is None:
raise AttributeError(
'The value of configuration setting %s is managed' % name)
setattr(cls, backing_field, value)
cls.__module__ = module
return
class InputHeader(object):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
def __init__(self):
self._settings = OrderedDict()
def __getitem__(self, name):
return self._settings[name]
def __iter__(self):
return self.iterkeys()
def __len__(self):
return len(self._settings)
def __repr__(self):
return ''.join(
[InputHeader.__name__, '(', repr(self._settings.items()), ')'])
def items(self):
return self._settings.items()
def iteritems(self):
return self._settings.iteritems()
def iterkeys(self):
return self._settings.iterkeys()
def itervalues(self):
return self._settings.itervalues()
def keys(self):
return self._settings.keys()
def values(self):
return self._settings.values()
def read(self, input_file):
""" Reads an InputHeader from `input_file`.
The input header is read as a sequence of *<key>***:***<value>* pairs
separated by a newline. The end of the input header is signalled by an
empty line or an end-of-file.
:param input_file: File-like object that supports iteration over lines
"""
key, value = None, None
import sys
for line in input_file:
if line == '\n':
break
if line[-1:] == '\n':
line = line[:-1]
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
self._update(key, value)
key, value = item[0], urllib.unquote(item[1])
elif key is not None:
# continuation of the current item
value = '\n'.join([value, urllib.unquote(line)])
self._update(key, value)
return
def _update(self, k, v):
if k is not None:
self._settings[k] = v
return
class MessagesHeader(object):
""" Represents an output messages header.
Messages in the header are of the form
*<message-level>***=***<message-text>***\r\n**
Message levels include:
+ debug_message
+ info_message
+ warn_message
+ error_messages
The end of the messages header is signalled by the occurrence of a single
blank line (`\r\n').
References:
+ [command.conf.spec](http://docs.splunk.com/Documentation/Splunk/6.0/Admin/Commandsconf#commands.conf.spec)
"""
def __init__(self):
self._messages = []
def __iadd__(self, (message_level, message_text)):
self._messages.append((message_level, message_text))
return self
def __iter__(self):
return self._messages.__iter__()
def __len__(self):
return len(self._messages)
def __repr__(self):
return ''.join([MessagesHeader.__name__, '(', repr(self._messages), ')'])
def append(self, message_level, message_text):
""" Adds a message level/text pair to this MessagesHeader """
if not message_level in MessagesHeader._message_levels:
raise ValueError('message_level="%s"' % message_level)
self._messages.append((message_level, message_text))
def write(self, output_file):
""" Writes this MessageHeader to an output stream.
Messages are written as a sequence of *<message_text-message_level>***=**
*<message_text-text>* pairs separated by '\r\n'. The sequence is
terminated by a pair of '\r\n' sequences.
"""
for message_level, message_text in self:
output_file.write('%s=%s\r\n' % (message_level, message_text))
output_file.write('\r\n')
_message_levels = [
'debug_message', 'warn_message', 'info_message', 'error_message']
class SearchCommandParser(object):
""" Parses the arguments to a search command.
A search command line is described by the following syntax.
**Syntax**::
command = command-name *[wsp option] *[wsp [dquote] field-name [dquote]]
command-name = alpha *( alpha / digit )
option = option-name [wsp] "=" [wsp] option-value
option-name = alpha *( alpha / digit / "_" )
option-value = word / quoted-string
word = 1*( %01-%08 / %0B / %0C / %0E-1F / %21 / %23-%FF ) ; Any character but DQUOTE and WSP
quoted-string = dquote *( word / wsp / "\" dquote / dquote dquote ) dquote
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
**Note:**
This syntax is constrained to an 8-bit character set.
**Note:**
This syntax does not show that `field-name` values may be comma-separated
when in fact they can be. This is because Splunk strips commas from the
command line. A custom search command will never see them.
**Example:**
countmatches fieldname = word_count pattern = \w+ some_text_field
Option names are mapped to properties in the targeted ``SearchCommand``. It
is the responsibility of the property setters to validate the values they
receive. Property setters may also produce side effects. For example,
setting the built-in `log_level` immediately changes the `log_level`.
"""
def parse(self, argv, command):
""" Splits an argument list into an options dictionary and a fieldname
list.
The argument list, `argv`, must be of the form::
*[option]... *[<field-name>]
Options are validated and assigned to items in `command.options`. Field
names are validated and stored in the list of `command.fieldnames`.
#Arguments:
:param command: Search command instance.
:type command: ``SearchCommand``
:param argv: List of search command arguments.
:type argv: ``list``
:return: ``None``
#Exceptions:
``SyntaxError``: Argument list is incorrectly formed.
``ValueError``: Unrecognized option/field name, or an illegal field value.
"""
# Prepare
command_args = ' '.join(argv)
command.fieldnames = None
command.options.reset()
command_args = SearchCommandParser._arguments_re.match(command_args)
if command_args is None:
raise SyntaxError("Syntax error: %s" % ' '.join(argv))
# Parse options
for option in SearchCommandParser._options_re.finditer(
command_args.group('options')):
name, value = option.group(1), option.group(2)
if not name in command.options:
raise ValueError('Unrecognized option: %s = %s' % (name, value))
command.options[name].value = SearchCommandParser.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) == 1:
raise ValueError('A value for "%s" is required' % missing[0])
else:
raise ValueError(
'Values for these options are required: %s' %
', '.join(missing))
# Parse field names
command.fieldnames = command_args.group('fieldnames').split()
command.logger.debug('%s: %s', type(command).__name__, command)
return
@classmethod
def unquote(cls, string):
""" Removes quotes from a quoted string.
Splunk search command quote rules are applied. The enclosing
double-quotes, if present, are removed. Escaped double-quotes ('\"' or
'""') are replaced by a single double-quote ('"').
**NOTE**
We are not using a json.JSONDecoder because Splunk quote rules are
different than JSON quote rules. A json.JSONDecoder does not recognize
a pair of double-quotes ('""') as an escaped quote ('"') and will decode
single-quoted strings ("'") in addition to double-quoted ('"') strings.
"""
if len(string) == 0:
return ''
if string[0] != '"':
return string
if len(string) == 1:
return string
if string[-1] != '"':
raise ValueError("Poorly formed string literal: %s" % string)
def replace(match):
value = match.group(0)
if value == '\\\\':
return '\\'
if value == '\\"':
return '"'
if value == '""':
return '"'
if len(value) != 2:
raise ValueError("Poorly formed string literal: %s" % string)
return value # consistent with python handling
result = re.sub(cls._escaped_quote_re, replace, string[1:-1])
return result
#region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:[_a-zA-Z][_a-zA-Z0-9]+) # name
\s*=\s* # =
(?:[^\s"]+|"(?:[^"]+|""|\\")*")\s*? # value
)*
)
\s*
(?P<fieldnames> # Match a trailing set of field names
(?:(?:[_a-zA-Z][_.a-zA-Z0-9-]+|"[_a-zA-Z][_.a-zA-Z0-9-]+")\s*)*
)
\s*$
""", re.VERBOSE)
_escaped_quote_re = re.compile(r"""(\\\\|\\"|""|\\."|\\)""")
_name_re = re.compile(r"""[_a-zA-Z][[_a-zA-Z0-9]+""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
([_a-zA-Z][_a-zA-Z0-9]+) # name
\s*=\s* # =
([^\s"]+|"(?:[^"]+|""|\\")*") # value
""", re.VERBOSE)
#endregion
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import sys
import time
import unittest
from fabric.api import local
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
wait_for_completion,
assert_several_times,
)
from lib.gobgp import (
GoBGPContainer,
extract_path_attribute,
)
from lib.quagga import QuaggaBGPContainer
from lib.exabgp import ExaBGPContainer
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
q1 = QuaggaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = QuaggaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = QuaggaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in qs:
g1.add_peer(q, passwd='passwd')
q.add_peer(g1, passwd='passwd', passive=True)
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx + 1)
q.add_route(route)
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_gobgp_global_rib(self):
for q in self.quaggas.itervalues():
# paths expected to exist in gobgp's global rib
routes = q.routes.keys()
timeout = 120
interval = 1
count = 0
while True:
# gobgp's global rib
state = self.gobgp.get_neighbor_state(q)
self.assertEqual(state, BGP_FSM_ESTABLISHED)
global_rib = [p['prefix'] for p in self.gobgp.get_global_rib()]
for p in global_rib:
if p in routes:
routes.remove(p)
if len(routes) == 0:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
# check gobgp properly add it's own asn to aspath
def test_03_check_gobgp_adj_out_rib(self):
for q in self.quaggas.itervalues():
for path in self.gobgp.get_adj_rib_out(q):
asns = path['aspath']
self.assertTrue(self.gobgp.asn in asns)
# check routes are properly advertised to all BGP speaker
def test_04_check_quagga_global_rib(self):
interval = 1
timeout = int(120 / interval)
for q in self.quaggas.itervalues():
done = False
for _ in range(timeout):
if done:
break
global_rib = q.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(interval)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
raise AssertionError
def test_05_add_quagga(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4)
q4.add_peer(self.gobgp)
q4.add_route('10.0.4.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
def test_06_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_07_stop_one_quagga(self):
g1 = self.gobgp
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
g1.del_peer(q4)
del self.quaggas['q4']
# check gobgp properly send withdrawal message with q4's route
def test_08_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_09_add_distant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 200}
q2.add_policy(med200, self.gobgp, 'out')
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 100}
q3.add_policy(med100, self.gobgp, 'out')
q5.add_route('10.0.6.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
timeout = 120
interval = 1
count = 0
while True:
paths = self.gobgp.get_adj_rib_out(q1, '10.0.6.0/24')
if len(paths) > 0:
path = paths[0]
print "{0}'s nexthop is {1}".format(path['nlri']['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
if path['nexthop'] in n_addrs:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def test_10_originate_path(self):
self.gobgp.add_route('10.10.0.0/24')
dst = self.gobgp.get_global_rib('10.10.0.0/24')
self.assertTrue(len(dst) == 1)
self.assertTrue(len(dst[0]['paths']) == 1)
path = dst[0]['paths'][0]
self.assertTrue(path['nexthop'] == '0.0.0.0')
self.assertTrue(len(path['aspath']) == 0)
def test_11_check_adj_rib_out(self):
for q in self.quaggas.itervalues():
paths = self.gobgp.get_adj_rib_out(q, '10.10.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
peer_info = self.gobgp.peers[q]
local_addr = peer_info['local_addr'].split('/')[0]
self.assertTrue(path['nexthop'] == local_addr)
self.assertTrue(path['aspath'] == [self.gobgp.asn])
def test_12_disable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.disable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q1)
time.sleep(3)
for route in q1.routes.iterkeys():
dst = self.gobgp.get_global_rib(route)
self.assertTrue(len(dst) == 0)
for q in self.quaggas.itervalues():
if q is q1:
continue
paths = self.gobgp.get_adj_rib_out(q, route)
self.assertTrue(len(paths) == 0)
def test_13_enable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.enable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q1)
def test_14_check_adj_rib_out(self):
self.test_11_check_adj_rib_out()
def test_15_check_active_connection(self):
g1 = self.gobgp
g2 = GoBGPContainer(name='g2', asn=65000, router_id='192.168.0.7',
ctn_image_name=self.gobgp.image,
log_level=parser_option.gobgp_log_level)
time.sleep(g2.run())
self.quaggas['g2'] = g2
g2.add_peer(g1, passive=True)
g1.add_peer(g2)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
def test_16_check_local_pref_and_med_handling(self):
g1 = self.gobgp
g1.add_route('10.20.0.0/24', local_pref=1000, med=2000)
# iBGP peer
g2 = self.quaggas['g2']
paths = g2.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(len(paths[0]['paths']) == 1)
path = paths[0]['paths'][0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
self.assertTrue(local_pref['value'] == 1000)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
# eBGP peer
q1 = self.quaggas['q1']
paths = q1.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
# local_pref's default value is 100
self.assertTrue(local_pref['value'] == 100)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
def test_17_check_shutdown(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q2.add_route('20.0.0.0/24')
q3.add_route('20.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = q1.get_global_rib('20.0.0.0/24')
self.assertEqual(len(paths), 1)
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
self.assertIn(paths[0]['nexthop'], n_addrs)
q3.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q3)
paths = q1.get_global_rib('20.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertIn(paths[0]['nexthop'], n_addrs)
g1.del_peer(q3)
del self.quaggas['q3']
def test_18_check_withdrawal(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
g1.add_route('30.0.0.0/24')
q1.add_route('30.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertNotIn('source-id', paths[0])
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertNotIn('source-id', paths[0])
g1.local('gobgp global rib del 30.0.0.0/24')
def f():
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertEqual(len(paths), 0)
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertEqual(len(paths), 1)
self.assertEqual(paths[0]['source-id'], '192.168.0.2')
assert_several_times(f)
def test_19_check_grpc_add_neighbor(self):
g1 = self.gobgp
e1 = ExaBGPContainer(name='e1', asn=65000, router_id='192.168.0.7')
time.sleep(e1.run())
e1.add_peer(g1)
self.quaggas['e1'] = e1
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n add {0} as 65000'.format(n))
g1.add_peer(e1, reload_config=False)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=e1)
def test_20_check_grpc_del_neighbor(self):
g1 = self.gobgp
e1 = self.quaggas['e1']
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n del {0}'.format(n))
g1.del_peer(e1, reload_config=False)
def test_21_check_withdrawal_2(self):
g1 = self.gobgp
g2 = self.quaggas['g2']
prefix = '40.10.0.0/24'
g1.add_route(prefix)
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 1)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 1)
r = g2.local('gobgp monitor global rib -j', stream=True, tty=False)
g1.local('gobgp global rib del 40.10.0.0/24')
del g1.routes[prefix]
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 0)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 0)
ret = json.loads(r.next())
self.assertTrue(ret[0]['nlri']['prefix'] == prefix)
self.assertTrue('withdrawal' in ret[0])
def test_22_check_cli_sorted(self):
g1 = self.gobgp
cnt = 0
def next_prefix():
for i in range(100, 105):
for j in range(100, 105):
yield '{0}.{1}.0.0/24'.format(i, j)
for p in next_prefix():
g1.local('gobgp global rib add {0}'.format(p))
cnt += 1
cnt2 = 0
g = next_prefix()
n = g.next()
for path in g1.local("gobgp global rib", capture=True).split('\n')[1:]:
if [elem for elem in path.split(' ') if elem != ''][1] == n:
try:
cnt2 += 1
n = g.next()
except StopIteration:
break
self.assertTrue(cnt == cnt2)
def test_23_check_withdrawal3(self):
gobgp_ctn_image_name = parser_option.gobgp_image
g1 = self.gobgp
g3 = GoBGPContainer(name='g3', asn=65006, router_id='192.168.0.8',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
g4 = GoBGPContainer(name='g4', asn=65007, router_id='192.168.0.9',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
initial_wait_time = max(ctn.run() for ctn in [g3, g4])
time.sleep(initial_wait_time)
self.quaggas = {'g3': g3, 'g4': g4}
g3.local('gobgp global rib add 50.0.0.0/24')
g1.add_peer(g3, passive=True)
g3.add_peer(g1)
g1.add_peer(g4, passive=True)
g4.add_peer(g1)
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
g4.local('gobgp global rib add 50.0.0.0/24 med 10')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.8')
g3.local('gobgp global rib del 50.0.0.0/24')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.9')
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 21:00:38 2017
@author: pchero
"""
import Tkinter as tk
import ttk
import tkFont
import tkSimpleDialog
class FrameMain(object):
container = None
action_handler = None
data_handler = None
# info
list_headers = ["uuid"]
detail_headers = ["key", "value"]
list_tree = None
list_items = None
detail_tree = None
detail_items = None
def __init__(self, master, data_handler, control_handler):
# set handlers
self.data_handler = data_handler
self.data_handler.set_view_handler(self)
self.action_handler = control_handler
self.action_handler.set_veiw_handler(self)
self.container = tk.Frame(master)
self.container.grid()
self.frame_setup()
def destroy(self):
print("destroy")
self.container.destroy()
def frame_setup(self):
self.frame_main()
self.update_list_items("dialing")
return
def frame_main(self):
frame = tk.Frame(self.container)
frame.grid()
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
# create list treeview
self.list_tree = ttk.Treeview(frame, columns=self.list_headers, show="headings", height=30)
self.list_tree.grid(column=0, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
list_vsb = ttk.Scrollbar(frame, orient="vertical", command=self.list_tree.yview)
list_vsb.grid(column=1, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
self.list_tree.configure(yscrollcommand=list_vsb.set)
self.list_tree.bind("<Double-Button-1>", self._action_list_double_click)
# create detail treeview
detail_tree = ttk.Treeview(frame, columns=self.detail_headers, show="headings", height=30)
detail_tree.grid(column=2, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
detail_vsb = ttk.Scrollbar(frame, orient="vertical", command=detail_tree.yview)
detail_vsb.grid(column=3, row=0, sticky=tk.E+tk.W+tk.N+tk.S, rowspan=4)
detail_tree.configure(yscrollcommand=detail_vsb.set)
detail_tree.bind("<Double-Button-1>", self._action_detail_double_click)
self.detail_tree = detail_tree
# Buttons
bt_create = tk.Button(frame, text="Show", width=8, command=self._action_button_show)
bt_create.grid(column=4, row=0, sticky=tk.E+tk.W+tk.N+tk.S)
bt_create = tk.Button(frame, text="Create", width=8, command=self._action_button_create)
bt_create.grid(column=4, row=1, sticky=tk.E+tk.W+tk.N+tk.S)
bt_update = tk.Button(frame, text="Update", width=8, command=self._action_button_update)
bt_update.grid(column=4, row=2, sticky=tk.E+tk.W+tk.N+tk.S)
bt_delete = tk.Button(frame, text="Delete", width=8, command=self._action_button_delete)
bt_delete.grid(column=4, row=3, sticky=tk.E+tk.W+tk.N+tk.S)
def _action_list_double_click(self, event):
# get activated item
# get selected key, value
cur_item = self.list_tree.focus()
uuid = self.list_tree.item(cur_item)["values"][0]
self.update_detail_items(uuid)
return
def _action_detail_double_click(self, event):
print("_action_detail_double_click")
# get selected key, value
cur_item = self.detail_tree.focus()
key = self.detail_tree.item(cur_item)["values"][0]
value = self.detail_tree.item(cur_item)["values"][1]
print("key, value. key[%s], value[%s]" % (key, value))
# get new value
ret = tkSimpleDialog.askstring("New value", "Please enter a new value", initialvalue=value)
if ret == None:
return
# update
print ("result. ret[%s]" % (ret))
self.detail_items[key] = ret
self.update_detail()
return
def _action_button_show(self):
print("_action_button_show")
# get search uuid
ret = tkSimpleDialog.askstring("Show dialing", "Please enter a dialing uuid")
if ret == None:
return
if ret == "":
self.action_handler.send_cmd_async("OutDialingShow")
else:
data = {"Uuid":ret}
self.action_handler.send_cmd_async("OutDialingShow", data)
return
def _action_button_create(self):
print("_action_button_create")
self.action_handler.send_cmd_async("OutDialingCreate")
return
def _action_button_update(self):
print("_action_button_update")
items = self.detail_items
self.action_handler.send_cmd_async("OutDialingUpdate", items)
return
def _action_button_delete(self):
print("_action_button_delete")
items = self.detail_items
uuid = items.pop("Uuid", None)
if uuid == None:
print("Could not get uuid info. item[%s]", items)
return
data = {"Uuid":uuid}
self.action_handler.send_cmd_async("OutDialingDelete", data)
return
def _update_list(self):
print("_update_list")
# delete all items
for i in self.list_tree.get_children():
self.list_tree.delete(i)
items = self.list_items
# insert items
for col in self.list_headers:
self.list_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.list_tree, c, 0))
# adjust the column's width to the header string
self.list_tree.column(col, width=tkFont.Font().measure(col.title()))
# insert imters
for key in items:
self.list_tree.insert('', 'end', values=(key))
# size arrange
col_w = tkFont.Font().measure(key)
if self.list_tree.column(self.list_headers[0], width=None) < col_w:
self.list_tree.column(self.list_headers[0], width=col_w)
def update_list_items(self, items):
'''
'''
print("update_list_items")
if items == None or items != "dialing":
return
self.list_items = self.data_handler.dialing_get_list_all()
#print self.list_items
self._update_list()
def update_detail_items(self, uuid):
if uuid == None:
return
data = self.data_handler.dialing_get(uuid)
if data == None:
print("Could not find correct dialing info. uuid[%s]" % uuid)
return
self.detail_items = data.copy()
self.update_detail()
return
def update_detail(self):
'''
update the detail tree
'''
items = self.detail_items
# delete all items
for i in self.detail_tree.get_children():
self.detail_tree.delete(i)
# sort
for col in self.detail_headers:
self.detail_tree.heading(col, text=col.title(), command=lambda c=col: sortby(self.detail_tree, c, 0))
# adjust the column's width to the header string
self.detail_tree.column(col, width=tkFont.Font().measure(col.title()))
if items == None:
return
# insert items
for key, val in items.iteritems():
self.detail_tree.insert('', 'end', values=(key, val))
# size arrange
col_w = tkFont.Font().measure(key)
if self.detail_tree.column(self.detail_headers[0], width=None) < col_w:
self.detail_tree.column(self.detail_headers[0], width=col_w)
col_w = tkFont.Font().measure(val)
if self.detail_tree.column(self.detail_headers[1], width=None) < col_w:
self.detail_tree.column(self.detail_headers[1], width=col_w)
return
def update_detail_item(self, event):
print("OnClick detail")
# get selected key, value
cur_item = self.detail_tree.focus()
key = self.detail_tree.item(cur_item)["values"][0]
value = self.detail_tree.item(cur_item)["values"][1]
print("key, value. key[%s], value[%s]" % (key, value))
# get new value
ret = tkSimpleDialog.askstring("New value", "Please enter a new value")
if ret == None:
return
# update
print ("result. ret[%s]" % (ret))
self.detail_items[key] = ret
self.update_detail()
return
|
|
#! /usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import json
import os.path
import re
import sys
import schema_util
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = comment[:first_parameter_location]
# We replace \n\n with <br/><br/> here and below, because the documentation
# needs to know where the newlines should be, and this is easier than
# escaping \n.
parent_comment = (parent_comment.strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
params = {}
for (cur_param, next_param) in itertools.izip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (comment[param_comment_start:param_comment_end
].strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
return (parent_comment, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a name/value pair where the value is a list of function parameters.
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
for node in self.node.children:
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
return self.node.GetName(), parameters
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = {}
for node in self.node.children:
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks):
properties = {}
name = self.node.GetName()
for property_name in ('OPTIONAL', 'nodoc', 'nocompile'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
is_function = False
parameter_comments = {}
for node in self.node.children:
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
is_function = True
name, parameters = Callspec(node, parameter_comments).process(callbacks)
properties['parameters'] = parameters
properties['name'] = name
if is_function:
properties['type'] = 'function'
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
if properties['type'] == 'integer':
enum_values = map(int, enum_values)
elif properties['type'] == 'double':
enum_values = map(float, enum_values)
properties['enum'] = enum_values
return name, properties
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties={}):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetProperty('OPTIONAL', False):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = {}
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = {}
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self, callbacks):
enum = []
for node in self.node.children:
if node.cls == 'EnumItem':
enum.append(node.GetName())
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
if self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, namespace_node, nodoc=False, permissions=None,
internal=False):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.events = []
self.functions = []
self.types = []
self.callbacks = {}
self.permissions = permissions or []
def process(self):
for node in self.namespace.children:
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Enum':
self.types.append(Enum(node).process(self.callbacks))
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return {'namespace': self.namespace.GetName(),
'nodoc': self.nodoc,
'documentation_permissions_required': self.permissions,
'types': self.types,
'functions': self.functions,
'internal': self.internal,
'events': self.events}
def process_interface(self, node):
members = []
for member in node.children:
if member.cls == 'Member':
name, properties = Member(member).process(self.callbacks)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
permissions = None
for node in self.idl:
if node.cls == 'Namespace':
namespace = Namespace(node, nodoc, permissions, internal)
namespaces.append(namespace.process())
nodoc = False
internal = False
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
continue
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'permissions':
permission = node.value.split(',')
elif node.name == 'internal':
internal = bool(node.value)
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
schema_util.PrefixSchemasWithNamespace(namespaces)
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
f = open(filename, 'r')
contents = f.read()
f.close()
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
for filename in sys.argv[1:]:
schema = Load(filename)
print json.dumps(schema, indent=2)
if __name__ == '__main__':
Main()
|
|
import io
from os.path import join, realpath, dirname
from pathlib import Path
import sys
import unittest
from unittest import mock
from unittest.mock import call
TEST_DIR = str(Path(dirname(realpath(__file__))).parent)
sys.path.insert(0, TEST_DIR)
from test.env import env
import dfm
from ioutils import ioutils
class TestDotfilesManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
dfm.env = env
dfm.ioutils.env = env
def setUp(self):
env.set_up()
dfm._set_args()
def tearDown(self):
env.tear_down()
@mock.patch('dfm._set_args')
@mock.patch('dfm._get_dotfiles_dict', \
return_value={'.fooconfig' : ['fooconfig', 'fooconfig_local'], '.barconfig' : ['barconfig']})
@mock.patch('dfm.ioutils', autospec=True)
@mock.patch('os.path.isdir', return_value=True)
def test_correct_branching_when_arg_r(self, isdir, ioutils, _get_dotfiles_dict, _set_args):
env.ARGS = env.parser.parse_args(['some_dir', '-r'])
dfm.main()
dfm._get_dotfiles_dict.assert_called_with(env.INPUT_DIR)
expected_calls = [call('.fooconfig'), call('.barconfig')]
ioutils.revert_dotfile.assert_has_calls(expected_calls)
@mock.patch('os.path.isdir', return_value=True)
def test_output_dir_stored_in_env_when_arg_o(self, isdir):
env.ARGS = env.parser.parse_args(['some_dir', '-o', 'some_other_dir'])
dfm._set_env()
self.assertTrue(env.OUTPUT_DIR == 'some_other_dir')
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils.compile_dotfile')
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('dfm._get_dotfiles_dict', return_value={'.gitconfig' : ['99-gitconfig', '99-gitconfig.local']})
def test_only_specified_dotfile_compiled_when_arg_f(self, _get_dotfiles_dict, isdir, compile_dotfile, _set_args):
dotfile = '.gitconfig'
input_files = ['99-gitconfig', '99-gitconfig.local']
env.ARGS = env.parser.parse_args(['some_dir', '-f', dotfile])
dfm.main()
ioutils.compile_dotfile.assert_called_once()
ioutils.compile_dotfile.assert_called_with(dotfile, input_files)
@mock.patch('dfm._set_args')
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('dfm._get_dotfiles_dict', return_value={'.gitconfig' : ['gitconfig', 'gitconfig_local']})
def test_print_error_exit_1_when_arg_f_and_invalid_dotfile_name(self, _get_dotfiles_dict, isdir, _set_args):
stderr = sys.stderr
err = io.StringIO()
sys.stderr = err
dfm.env.ARGS = env.parser.parse_args(['some_dir', '-f', 'foobar'])
with self.assertRaises(SystemExit) as se:
dfm.main()
self.assertEqual(se.exception.code, 1)
self.assertTrue("No input files found" in err.getvalue())
sys.stderr = stderr
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils')
@mock.patch('dfm._get_dotfiles_dict', return_value={})
@mock.patch('os.path.isdir', return_value=True)
def test_only_specified_file_reverted_when_args_rf(self, isdir, _get_dotfiles_dict, ioutils, _set_args):
env.ARGS = env.parser.parse_args(['some_dir', '-r', '-f', '.bashrc'])
dfm.main()
ioutils.revert_dotfile.assert_called_with('.bashrc')
ioutils.compile_dotfile.assert_not_called()
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils')
@mock.patch('os.path.isdir', return_value=False)
def test_print_error_exit_1_when_invalid_input_dir(self, isdir, ioutils, _set_args):
stderr = sys.stderr
err = io.StringIO()
sys.stderr = err
input_dir = 'some_nonexistent_dir'
env.ARGS = env.parser.parse_args(['-r', input_dir])
with self.assertRaises(SystemExit) as se:
dfm._set_env()
self.assertEqual(se.exception.code, 1)
self.assertTrue("Specified input directory {0} does not exist.".format(input_dir) in err.getvalue())
sys.stderr = stderr
@mock.patch('dfm._set_args')
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('os.listdir', return_value=['foorc', 'foorc_local', '99-bar.config', 'bar.config'])
@mock.patch('ioutils.ioutils.compile_dotfile')
def test_dotfiles_compiled_by_input_file_name_convention(self, compile_dotfile, listdir, isfile, isdir, set_args):
env.ARGS = env.parser.parse_args(['some_dir'])
expected_calls = [call('.foorc', ['foorc', 'foorc_local']), \
call('.bar.config', ['99-bar.config', 'bar.config'])]
dfm.main()
ioutils.compile_dotfile.assert_has_calls(expected_calls)
@mock.patch('ioutils.ioutils.os.listdir', return_value=['99-gitconfig', '98-gitconfig_local', 'vimrc', '99-bashrc', 'bashrc_local'])
@mock.patch('ioutils.ioutils.os.path.isfile', return_value=True)
def test_get_dotfiles_dict(self, listdir, isfile):
expected = set({
'.gitconfig' : ['.gitconfig'],
'.vimrc': ['vimrc'],
'.bashrc': ['99-bashrc', 'bashrc_local']
})
self.assertEqual(set(dfm._get_dotfiles_dict(env.INPUT_DIR)), expected)
@mock.patch('dfm._set_args')
@mock.patch('dfm.os.listdir', \
return_value=['gitconfig', 'gitconfig_local', 'bashrc', 'bashrc_local'])
@mock.patch('dfm.ioutils.os.path.isdir', return_value=True)
@mock.patch('dfm.ioutils.os.path.isfile', return_value=True)
@mock.patch('ioutils.ioutils._write_input_file_contents')
def test_when_arg_e_then_specified_files_are_excluded(self, _write_input_file_contents, isfile, isdir, listdir, _set_args):
env.ARGS = env.parser.parse_args([
'some_dir',
'-e', 'gitconfig_local',
'-e', 'bashrc_local'
])
actual_dict = dfm._get_dotfiles_dict(env.INPUT_DIR)
self.assertTrue('gitconfig_local' not in actual_dict['.gitconfig'])
self.assertTrue('bashrc_local' not in actual_dict['.bashrc'])
@mock.patch('os.path.isfile', return_value=True)
def test_get_dotfile_name_from_input_filename(self, isfile):
input_files = ['gitconfig_local', '99-vimrc', '99-tmux.conf_local', '98-tmux.conf', 'bashrc_mac-gnu']
dotfile_names = []
for input_file in input_files:
dotfile_names.append(dfm._get_dotfile_name(input_file))
self.assertEqual(set(dotfile_names), set(['.gitconfig', '.tmux.conf', '.vimrc', '.bashrc']))
@mock.patch('dfm.ioutils.os.path.isdir', return_value=True)
def test_error_when_input_dir_same_as_output_dir(self, isdir):
stderr = sys.stderr
err = io.StringIO()
sys.stderr = err
user_home_dir = 'my_home_dir'
env.OUTPUT_DIR = user_home_dir
env.ARGS = env.parser.parse_args([user_home_dir])
with self.assertRaises(SystemExit) as sys_exit:
dfm._set_env()
self.assertEqual(sys_exit.exception.code, 1)
self.assertTrue("INPUT_DIR {0} cannot be the same as OUTPUT_DIR {1}" \
.format(user_home_dir, user_home_dir) in err.getvalue())
sys.stderr = stderr
@mock.patch('dfm.ioutils.os.path.isdir', return_value=True)
def test_arg_dry_run_implies_arg_verbose(self, isdir):
stdout = sys.stdout
out = io.StringIO()
sys.stdout = out
dfm._set_args()
env.ARGS = env.parser.parse_args(['some_dir', '--dry-run'])
dfm._set_env()
self.assertTrue(env.ARGS.dry_run)
self.assertTrue(env.ARGS.verbose)
sys.stdout = stdout
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils.create_symlink')
@mock.patch('dfm._get_dotfiles_dict', return_value={'.fooconfig' : ['fooconfig']})
def test_symlink_created_when_single_input_file(self, get_dotfiles_dict, create_symlink, set_args, isdir):
env.ARGS = env.parser.parse_args(['some_dir'])
dfm.main()
create_symlink.assert_called_once_with(join(env.INPUT_DIR, 'fooconfig'), \
join(env.OUTPUT_DIR, '.fooconfig'))
@mock.patch('builtins.open')
@mock.patch('ioutils.ioutils._remove_symlink')
@mock.patch('ioutils.ioutils.islink', return_value=True)
@mock.patch('dfm.ioutils._back_up_file')
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils.create_symlink')
@mock.patch('dfm._get_dotfiles_dict', return_value={'.fooconfig' : ['99-fooconfig', 'fooconfig']})
def test_existing_symlink_removed_when_multiple_input_files(self, get_dotfiles_dict, create_symlink, set_args, isdir, back_up_file, islink, remove_symlink, m_open):
env.ARGS = env.parser.parse_args(['some_dir'])
dfm.main()
back_up_file.assert_not_called()
remove_symlink.assert_called_once()
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('ioutils.ioutils.lexists', return_value=True)
@mock.patch('dfm._set_args')
@mock.patch('ioutils.ioutils.os.readlink', return_value='vimrc')
@mock.patch('dfm.ioutils._back_up_file')
@mock.patch('dfm.ioutils.islink', return_value=False)
@mock.patch('dfm.ioutils.isfile', return_value=True)
@mock.patch('dfm.ioutils.os.symlink')
@mock.patch('dfm._get_dotfiles_dict', return_value={'.fooconfig' : ['fooconfig']})
def test_existing_dotfile_replaced_with_symlink_when_single_input_file(self, get_dotfiles_dict, symlink, isfile, islink, back_up_file, readlink, set_args, lexists, isdir):
env.ARGS = env.parser.parse_args(['some_dir'])
dfm.main()
input_file = join(env.INPUT_DIR, 'fooconfig')
output_file = join(env.OUTPUT_DIR, '.fooconfig')
back_up_file.assert_called_once_with(output_file)
symlink.assert_called_once_with(input_file, output_file)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('dfm._set_args')
@mock.patch('dfm.ioutils.compile_dotfile')
@mock.patch('dfm.ioutils.create_symlink')
@mock.patch('dfm._get_dotfiles_dict', return_value={'.fooconfig' : ['fooconfig']})
def test_symlinks_not_created_when_arg_no_symlinks(self, get_dotfiles_dict, create_symlink, compile_dotfile, set_args, isdir):
env.ARGS = env.parser.parse_args(['some_dir', '--no-symlinks'])
dfm.main()
dotfile = '.fooconfig'
input_files = ['fooconfig']
create_symlink.assert_not_called()
compile_dotfile.assert_called_once_with(dotfile, input_files)
if __name__ == '__main__':
unittest.main(module=__name__, buffer=True, exit=False)
|
|
# -*- coding: utf-8 -*-
"""Utility for assigning pathways."""
import itertools as itt
import random
from collections import defaultdict
from typing import List, Optional
import pyobo
import pyobo.sources.hgnc
from pybel import BELGraph
from pybel.dsl import BaseAbundance, ListAbundance
__all__ = [
'PathwayAssigner',
]
class PathwayAssigner:
"""A tool for assigning pathways to unannotated BEL edges."""
def __init__(
self,
*,
graph: BELGraph,
managers: List,
):
"""Initialize the pathway assigner with several lookup dictionaries.
:param managers: A ComPath manager or iterable of ComPath managers
"""
self.graph = graph
self.pathway_to_symbols = defaultdict(set)
self.symbol_to_pathways = defaultdict(set)
if not isinstance(managers, list):
managers = []
for manager in managers:
self._add_manager(manager)
# These won't be loaded more so convert to normal dicts
self.pathway_to_symbols = dict(self.pathway_to_symbols)
self.symbol_to_pathways = dict(self.symbol_to_pathways)
hgnc_obo = pyobo.sources.hgnc.get_obo()
self.hgnc_id_to_symbol = pyobo.get_id_name_mapping('hgnc')
# Prepare MGI
self.hgnc_mgi_mapping = hgnc_obo.get_relations_mapping('ro:HOM0000017', 'mgi')
self.mgi_to_hgnc = {v: k for k, v in self.hgnc_mgi_mapping.items()}
self.mgi_id_to_symbol = pyobo.get_id_name_mapping('mgi')
self.mgi_symbol_to_hgnc_symbol = {
self.mgi_id_to_symbol[mgi_id]: self.hgnc_id_to_symbol[hgnc_id]
for mgi_id, hgnc_id in self.mgi_to_hgnc.items()
}
# Prepare RGD
self.hgnc_rgd_mapping = hgnc_obo.get_relations_mapping('ro:HOM0000017', 'rgd')
self.rgd_to_hgnc = {v: k for k, v in self.hgnc_rgd_mapping.items()}
self.rgd_id_to_symbol = pyobo.get_id_name_mapping('rgd')
self.rgd_symbol_to_hgnc_symbol = {
self.rgd_id_to_symbol[rgd_id]: self.hgnc_id_to_symbol[hgnc_id]
for rgd_id, hgnc_id in self.rgd_to_hgnc.items()
}
self.pathway_to_key = defaultdict(set)
self.key_to_pathway = defaultdict(set)
self.pmid_to_pathway = defaultdict(set)
self.pathway_to_pmid = defaultdict(set)
self.double_annotated = defaultdict(lambda: defaultdict(list))
def _add_manager(self, manager) -> None:
"""Add a ComPath manager."""
for pathway in manager._query_pathway().all():
db_id = getattr(pathway, f'{manager.module_name}_id')
pathway_tuple = manager.module_name, db_id, pathway.name
for protein in pathway.proteins:
self.pathway_to_symbols[pathway_tuple].add(protein.hgnc_symbol)
self.symbol_to_pathways[protein.hgnc_symbol].add(pathway_tuple)
def to_file(self, tsv_path, rst_path) -> None:
"""Save results to files."""
with open(tsv_path, 'w') as file, open(rst_path, 'w') as log_file:
self._to_file(file=file, log_file=log_file)
def _to_file(self, file, log_file) -> None:
print('database', 'pathway_id', 'pathway_name', 'key', 'bel', sep='\t', file=file)
for (db, pathway_id, pathway), names_dict in self.double_annotated.items():
title = f'{db}:{pathway_id} - {pathway}'
print(title, file=log_file)
print('=' * len(title), file=log_file)
for node_key, keys_and_data in names_dict.items():
print('', file=log_file)
print(node_key, file=log_file)
print('-' * len(str(node_key)), file=log_file)
for u, v, key, data in keys_and_data:
print('-', key[:8], self.graph.edge_to_bel(u, v, data), file=log_file)
print(db, pathway_id, pathway, key, self.graph.edge_to_bel(u, v, data), sep='\t', file=file)
print('', file=log_file)
def summarize(self):
"""Print the summary of the annotations."""
annotated_edge_keys = set(itt.chain.from_iterable(self.pathway_to_key.values()))
n_edges_annotated = len(annotated_edge_keys)
print(f'{n_edges_annotated} ({n_edges_annotated / self.graph.number_of_edges():.2%}) '
f'of {self.graph.number_of_edges()} edges were annotated')
unannotated_edges = [
(u, v, k, d)
for u, v, k, d in self.graph.edges(data=True, keys=True)
if k not in annotated_edge_keys
]
print(f'There are {len(unannotated_edges)} unannotated edges')
print('\nExamples of unannotated nodes:\n')
for u, v, k, d in random.sample(unannotated_edges, 15):
print(k[:8], self.graph.edge_to_bel(u, v, d))
print()
annotated_nodes = {
node
for u, v, k in self.graph.edges(keys=True)
if k in annotated_edge_keys
for node in (u, v)
}
n_nodes_annotated = len(annotated_nodes)
print(f'{n_nodes_annotated} ({n_nodes_annotated / self.graph.number_of_nodes():.2%}) '
f'of {self.graph.number_of_nodes()} nodes were annotated')
unannotated_nodes = set(self.graph) - annotated_nodes
print(f'There are {len(unannotated_nodes)} unannotated nodes')
print('\nExamples of unannotated nodes:\n')
for node in random.sample(unannotated_nodes, 15):
print(node)
def get_gene(self, node: BaseAbundance) -> Optional[str]:
"""Get or map the name to HGNC gene symbol of the node, if possible."""
try:
namespace = node.namespace.lower()
except AttributeError:
return
if namespace == 'hgnc':
return node.name
if namespace == 'rgd':
return self.rgd_symbol_to_hgnc_symbol.get(node.name)
if namespace == 'mgi':
return self.mgi_symbol_to_hgnc_symbol.get(node.name)
def annotate_gene_gene(self):
"""Annotate edges between gene or gene product nodes.
1. Identify if subject and object are both gene nodes. If they are orthologs, try and map them to HGNC.
2. `If` the subject and object in an edge are both in a canonical pathway, then the edge gets assigned to the
pathway.
3. `Else if` only one of the subject and the object in the edge have been assigned in the pathway:
1. `If` the edge is an ontological edge, than add it to the pathway
2. `If` there are other edges in the pathway mentioned in the same article, assign the edge to the pathway
3. `Else` leave for manual curation
4. `Else if` neither of the nodes are assigned to the pathway, but both nodes are connected to nodes in the
pathway by directed edges, assign both edge to the pathway as well as incident edges
5. `Else` the nodes don't get assigned to the pathway
"""
c = 0
for u, v, k, d in self.graph.edges(keys=True, data=True):
if not isinstance(u, BaseAbundance) or not isinstance(v, BaseAbundance):
continue
u_name, v_name = self.get_gene(u), self.get_gene(v)
if u_name is None or v_name is None:
continue
for pathway_tuple, symbols in self.pathway_to_symbols.items():
if u_name not in symbols or v_name not in symbols:
continue
self.double_annotated[pathway_tuple][tuple(sorted([u_name, v_name]))].append((u, v, k, d))
self.pathway_to_key[pathway_tuple].add(k)
self.key_to_pathway[k].add(pathway_tuple)
citation = d.get('citation')
if citation is not None:
reference = citation['reference']
self.pmid_to_pathway[reference].add(pathway_tuple)
self.pathway_to_pmid[pathway_tuple].add(reference)
c += 1
return c
def annotate_gene_other(self):
"""Annotate edges between gene or gene product nodes and chemicals / biological processes / diseases.
1. Identify if subject or object are a gene nodes. If they are orthologs, try and map them to HGNC.
2. If an entity is related to a gene in a pathway, then that edge gets annotated to the pathway
"""
c = 0
for u, v, k, d in self.graph.edges(keys=True, data=True):
if not isinstance(u, BaseAbundance) or not isinstance(v, BaseAbundance):
continue
u_name, v_name = self.get_gene(u), self.get_gene(v)
if u_name and v_name is None:
gene_name = u_name
other_name = v.name
elif u_name is None and v_name:
gene_name = v_name
other_name = u.name
else:
continue
try:
ordering = tuple(sorted([gene_name, other_name]))
except TypeError:
print('Gene', gene_name)
print('Other', other_name)
continue
for pathway_tuple, symbols in self.pathway_to_symbols.items():
if gene_name not in symbols:
continue
self.double_annotated[pathway_tuple][ordering].append((u, v, k, d))
self.pathway_to_key[pathway_tuple].add(k)
self.key_to_pathway[k].add(pathway_tuple)
citation = d.get('citation')
if citation is not None:
reference = citation['reference']
self.pmid_to_pathway[reference].add(pathway_tuple)
self.pathway_to_pmid[pathway_tuple].add(reference)
c += 1
return c
def annotate_by_document(self):
"""Annotate edges with a gene or gene product nodes that has other annotated edges in its original document.
If an edge has only one node that appears in a pathway, but that pathway has already been mentioned in the
paper, then it gets annotated to that pathway too.
"""
c = 0
for u, v, k, d in self.graph.edges(keys=True, data=True):
citation = d.get('citation')
if citation is None:
continue
reference = citation['reference']
pathway_tuples = self.pmid_to_pathway[reference]
u_name, v_name = self.get_gene(u), self.get_gene(v)
if u_name and v_name is None:
gene_name = u_name
elif u_name is None and v_name:
gene_name = v_name
else:
continue
if gene_name not in self.symbol_to_pathways:
continue
for pathway_tuple in pathway_tuples:
if pathway_tuple not in self.symbol_to_pathways[gene_name]:
continue
self.double_annotated[pathway_tuple][gene_name].append((u, v, k, d))
self.pathway_to_key[pathway_tuple].add(k)
self.key_to_pathway[k].add(pathway_tuple)
c += 1
return c
def annotate_complexes(self):
"""Annotated complex nodes.
If two or more members of a complex are in a pathway, then the whole complex and all of its partOf
relationships will get assigned to that pathway.
"""
c = 0
for node in self.graph:
if not isinstance(node, ListAbundance):
continue
mapped_names = []
for member in node.members:
if not isinstance(member, BaseAbundance):
continue
name = self.get_gene(member)
if name is not None:
mapped_names.append(name)
if not mapped_names:
continue
for pathway_tuple, symbols in self.pathway_to_symbols.items():
in_count = sum(
name in symbols
for name in mapped_names
)
should_annotate_complex = (
(1 == len(mapped_names) and 1 == in_count) # Other stuff going on, let's do it
or 2 <= in_count # enough is going on, let's do it
)
if not should_annotate_complex:
continue
# do it
# do it
# do it
for u, v, k, d in self.graph.edges(node, keys=True, data=True):
self.double_annotated[pathway_tuple][node].append((u, v, k, d))
self.pathway_to_key[pathway_tuple].add(k)
self.key_to_pathway[k].add(pathway_tuple)
c += 1
return c
# TODO add FamPlex hierarchy resolution
# TODO add partOf relationship resolution
if __name__ == '__main__':
PathwayAssigner(graph=None, managers=None)
|
|
#!/usr/bin/env python
"""
This module contains tests for djenerator app.
"""
import datetime
import itertools
import os
import random
import re
import tempfile
import uuid
from decimal import Decimal
from django.db import models
from django.db.models import Model
from django.db.models.fields import BigIntegerField
from django.db.models.fields import BooleanField
from django.db.models.fields import CharField
from django.db.models.fields import CommaSeparatedIntegerField
from django.db.models.fields import DateField
from django.db.models.fields import DateTimeField
from django.db.models.fields import DecimalField
from django.db.models.fields import DurationField
from django.db.models.fields import EmailField
from django.db.models.fields import FilePathField
from django.db.models.fields import FloatField
from django.db.models.fields import GenericIPAddressField
from django.db.models.fields import IntegerField
from django.db.models.fields import NullBooleanField
from django.db.models.fields import PositiveIntegerField
from django.db.models.fields import PositiveSmallIntegerField
from django.db.models.fields import SmallIntegerField
from django.db.models.fields import SlugField
from django.db.models.fields import TextField
from django.db.models.fields import TimeField
from django.db.models.fields import URLField
from django.db.models.fields import UUIDField
from django.test import TestCase
from djenerator.fields_generator import generate_random_values
from djenerator.generate_test_data import create_model
from djenerator.generate_test_data import dependencies
from djenerator.generate_test_data import dfs
from djenerator.generate_test_data import djenerator
from djenerator.generate_test_data import field_sample_values
from djenerator.generate_test_data import generate_model
from djenerator.generate_test_data import generate_test_data
from djenerator.generate_test_data import recompute
from djenerator.generate_test_data import topological_sort
from djenerator.model_reader import field_type
from djenerator.model_reader import is_auto_field
from djenerator.model_reader import is_instance_of_django_model
from djenerator.model_reader import is_related
from djenerator.model_reader import is_required
from djenerator.model_reader import is_reverse_related
from djenerator.model_reader import list_of_fields
from djenerator.model_reader import list_of_models
from djenerator.model_reader import module_import
from djenerator.model_reader import names_of_fields
from djenerator.model_reader import relation_type
from djenerator.values_generator import generate_big_integer
from djenerator.values_generator import generate_boolean
from djenerator.values_generator import generate_comma_separated_int
from djenerator.values_generator import generate_date
from djenerator.values_generator import generate_date_time
from djenerator.values_generator import generate_decimal
from djenerator.values_generator import generate_email
from djenerator.values_generator import generate_int
from djenerator.values_generator import generate_integer
from djenerator.values_generator import generate_ip
from djenerator.values_generator import generate_positive_integer
from djenerator.values_generator import generate_positive_small_integer
from djenerator.values_generator import generate_small_integer
from djenerator.values_generator import generate_sentence
from djenerator.values_generator import generate_string
from djenerator.values_generator import generate_text
from djenerator.values_generator import generate_time
from djenerator.values_generator import generate_url
from djenerator.utility import sort_unique_tuple
from djenerator.utility import sort_unique_tuples
from djenerator.utility import unique_items
import models as mdls
from models import AllFieldsModel
from models import CycleA
from models import CycleB
from models import CycleC
from models import CycleD
from models import CycleE
from models import CycleF
from models import ExtendAbstract
from models import ExtendSuperClass
from models import ExtendingModel
from models import NotExtendingModel
from models import ProxyExtend
from models import SuperAbstract
from models import SuperClass
from models import TestModel0
from models import TestModel1
from models import TestModelA
from models import TestModelB
from models import TestModelC
from models import TestModelD
from models import TestModelE
from models import TestModelFields
from models import TestModelFieldsTwo
from models import TestModelX
from models import TestModelY
class TestFieldToRandomGeneratorMatcher(TestCase):
def test(self):
fields = list_of_fields(AllFieldsModel)
present_types = list(map(lambda field: field.__class__, fields))
field_types = [BigIntegerField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, FloatField,
GenericIPAddressField, IntegerField, NullBooleanField,
PositiveIntegerField, PositiveSmallIntegerField,
SmallIntegerField, TextField, TimeField]
self.assertFalse(set(field_types) - set(present_types),
"All types should be present." +
str(set(field_types) - set(present_types)))
for field in fields:
sample_siz = 10
values = generate_random_values(field, sample_siz)
self.assertLessEqual(len(values), sample_siz)
self.assertGreaterEqual(len(values), 1)
for val in values:
if isinstance(field, IntegerField):
self.assertTrue(isinstance(val, int), val)
if isinstance(field, EmailField):
self.assertTrue(isinstance(val, str), val)
email_reg = r'^\w+(?:\.\w+)*@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$'
self.assertRegexpMatches(val, email_reg, val)
if isinstance(field, BooleanField):
self.assertTrue(isinstance(val, bool), val)
if isinstance(field, CharField):
self.assertTrue(isinstance(val, str), val)
self.assertLessEqual(len(val), field.max_length, val)
if isinstance(field, CommaSeparatedIntegerField):
self.assertTrue(isinstance(val, str), val)
comma_sep_int_re = r'^\d{1,3}(?:,\d{3})*$'
self.assertRegexpMatches(val, comma_sep_int_re, val)
if isinstance(field, DateField):
self.assertTrue(isinstance(val, datetime.date), val)
if isinstance(field, DateTimeField):
self.assertTrue(isinstance(val, datetime.datetime), val)
if isinstance(field, DecimalField):
self.assertTrue(isinstance(val, Decimal), val)
if isinstance(field, FloatField):
self.assertTrue(isinstance(val, float), val)
if isinstance(field, GenericIPAddressField):
self.assertTrue(isinstance(val, str), val)
ip_regex = r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$'
self.assertRegexpMatches(val, ip_regex, val)
if isinstance(field, PositiveIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 2147483647, val)
self.assertGreaterEqual(val, 0, val)
if isinstance(field, PositiveSmallIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 32767, val)
self.assertGreaterEqual(val, 0, val)
if isinstance(field, SmallIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 32767, val)
self.assertGreaterEqual(val, -32768, val)
if isinstance(field, TimeField):
self.assertTrue(isinstance(val, datetime.time), val)
if isinstance(field, TextField):
self.assertTrue(isinstance(val, str), val)
self.assertLessEqual(len(val), field.max_length)
text_re = r'^(?:(?:\w+\s?)+\.\s?)+$'
self.assertRegexpMatches(val, text_re, val)
if isinstance(field, DurationField):
self.assertTrue(isinstance(val, datetime.timedelta), val)
if isinstance(field, SlugField):
self.assertTrue(isinstance(val, str), val)
slug_re = r'^[a-zA-Z0-9_\-]+$'
self.assertRegexpMatches(val, slug_re, val)
if isinstance(field, URLField):
url_re = r'^(?:http|ftp|https)://(?:[a-z0-9_\-]+\.?)+/?'
url_re += r'(?:/[a-z0-9_\-]+)*/?$'
self.assertTrue(isinstance(val, str), val)
self.assertRegexpMatches(val, url_re, val)
if isinstance(field, UUIDField):
self.assertTrue(isinstance(val, uuid.UUID), val)
if isinstance(field, FilePathField):
self.assertTrue(isinstance(val, str), val)
self.assertTrue(os.path.exists(val), val)
class TestInstanceOfDjangoModel(TestCase):
def test(self):
models = [TestModel0, TestModel1, TestModelA, TestModelB,
TestModelC, TestModelD, TestModelE, TestModelX,
TestModelY, ExtendingModel]
for model in models:
self.assertTrue(is_instance_of_django_model(model))
self.assertFalse(is_instance_of_django_model(NotExtendingModel))
def not_extending_model_func():
pass
self.assertFalse(is_instance_of_django_model(not_extending_model_func))
class TestListOfModels(TestCase):
def test(self):
self.assertEqual(set([ExtendingModel, TestModel0, TestModel1,
TestModelA, TestModelB, TestModelC, TestModelD,
TestModelE, TestModelX, TestModelY,
TestModelFields, SuperClass, ExtendAbstract,
ExtendSuperClass, ProxyExtend, SuperAbstract,
TestModelFieldsTwo, CycleA, CycleB, CycleC,
CycleD, CycleE, CycleF, AllFieldsModel]),
set(list_of_models(mdls, keep_abstract=True)))
self.assertEqual(set([ExtendingModel, TestModel0, TestModel1,
TestModelA, TestModelB, TestModelC, TestModelD,
TestModelE, TestModelX, TestModelY,
TestModelFields, SuperClass, ExtendAbstract,
ExtendSuperClass, TestModelFieldsTwo,
ProxyExtend, CycleA, CycleB, CycleC, CycleD,
CycleE, CycleF, AllFieldsModel]),
set(list_of_models(mdls)))
class TestListOfFields(TestCase):
def test(self):
self.assertTrue(all(isinstance(*x)
for x in zip(list_of_fields(TestModel1),
[models.AutoField, models.CharField,
models.IntegerField,
models.ForeignKey])))
self.assertTrue(all([isinstance(*x)
for x in zip(list_of_fields(TestModel0),
[models.AutoField,
models.BooleanField,
models.EmailField])]))
self.assertTrue(all([isinstance(*x)
for x in zip(list_of_fields(TestModelE),
[models.AutoField,
models.OneToOneField,
models.ForeignKey,
models.IntegerField,
models.ManyToManyField])]))
class TestNamesOfFields(TestCase):
def test(self):
self.assertEqual(['id', 'field1E', 'field3E', 'field4E', 'field2E'],
names_of_fields(TestModelE))
self.assertEqual(['id', 'field1', 'field2', 'field3'],
names_of_fields(TestModel1))
self.assertEqual(['id', 'field1', 'field2'],
names_of_fields(TestModel0))
class TestFieldType(TestCase):
def test(self):
self.assertEqual(field_type(models.CharField()),
'CharField')
self.assertEqual(field_type(models.IntegerField()),
'IntegerField')
self.assertEqual(field_type(models.EmailField()),
'CharField')
self.assertEqual(field_type(models.BooleanField()),
'BooleanField')
self.assertEqual(field_type(models.ForeignKey(ExtendingModel)),
'ForeignKey')
self.assertEqual(field_type(models.OneToOneField(ExtendingModel)),
'OneToOneField')
self.assertEqual(field_type(models.ManyToManyField(ExtendingModel)),
'ManyToManyField')
class TestIsAutoField(TestCase):
def test(self):
self.assertTrue(is_auto_field(models.AutoField(primary_key=True)))
self.assertFalse(is_auto_field(models.CharField()))
self.assertFalse(is_auto_field(models.BooleanField()))
self.assertFalse(is_auto_field(models.IntegerField()))
self.assertFalse(is_auto_field(models.ForeignKey(ExtendingModel)))
class TestIsRelated(TestCase):
def test(self):
self.assertTrue(is_related(models.ForeignKey))
self.assertTrue(is_related(models.OneToOneField))
self.assertTrue(is_related(models.ManyToManyField))
self.assertFalse(is_related(models.CharField))
self.assertFalse(is_related(models.BooleanField))
self.assertFalse(is_related(models.EmailField))
self.assertFalse(is_related(models.IntegerField))
class TestRelationType(TestCase):
def test(self):
self.assertEqual(relation_type(models.OneToOneField(ExtendingModel)),
'OneToOneRel')
self.assertEqual(relation_type(models.ManyToManyField(ExtendingModel)),
'ManyToManyRel')
self.assertEqual(relation_type(models.ForeignKey(ExtendingModel)),
'ManyToOneRel')
class TestIsRequired(TestCase):
def test(self):
field = models.CharField(max_length=20, null=True)
self.assertFalse(is_required(field))
field = models.IntegerField(null=True)
self.assertFalse(is_required(field))
field = models.IntegerField()
self.assertTrue(is_required(field))
field = models.ForeignKey(ExtendingModel)
self.assertTrue(is_required(field))
field = models.ForeignKey(ExtendingModel, null=True)
self.assertFalse(is_required(field))
class TestModuleImport(TestCase):
def test(self):
self.assertEqual(mdls, module_import('tests.models'))
class TestListOfSampleFieldValues(TestCase):
def test(self):
Y = list_of_fields(TestModelY)
X = list_of_fields(TestModelX)
A = list_of_fields(TestModelA)
B = list_of_fields(TestModelB)
C = list_of_fields(TestModelC)
D = list_of_fields(TestModelD)
E = list_of_fields(TestModelE)
self.assertFalse(field_sample_values(X[0]))
self.assertEqual(field_sample_values(Y[1]), [2, 3, 5, 7, 11, 13])
self.assertEqual(field_sample_values(Y[2]), ['MMa', 'XXa', 'azz'])
self.assertEqual(field_sample_values(X[1]),
[x * x * x for x in range(10)])
self.assertEqual(field_sample_values(E[3]), [1000000009, 1000003, 101])
self.assertEqual(field_sample_values(D[1]),
[x * x * x for x in range(10)])
self.assertEqual(field_sample_values(C[1]),
['Hello I am C', 'MUHAHAHAHAHA', 'CCCC', '^_^'])
self.assertEqual(field_sample_values(B[1]),
['Hello Universe', 'Hello Parallel Universe!'])
self.assertEqual(field_sample_values(A[1]),
['Hello World', 'Hello Africa', 'axxx!!'])
self.assertEqual(field_sample_values(A[2]),
['Hello Second Field', 'field 2'])
a = TestModelX(field1X=12)
b = TestModelX(field1X=15)
a.save()
b.save()
self.assertEqual((field_sample_values(models.ForeignKey(TestModelX))),
([a, b]))
fld = models.ManyToManyField(TestModelX)
self.assertTrue(all([x in [a, b]
for x in field_sample_values(fld)[0]]))
vals = [int(x) for x in field_sample_values(list_of_fields(CycleF)[2])]
self.assertEqual(vals, range(4000, 5000))
class TestCreateModel(TestCase):
def test(self):
kwargsa = {'field1A': 'Hrr', 'field2A': 'HxxA'}
atest = create_model(TestModelA, kwargsa.items())
self.assertEqual(atest, TestModelA.objects.get(**kwargsa))
kwargsa = {'field1B': 'Hello Worrd', 'field2B': atest}
btest = create_model(TestModelB, kwargsa.items())
self.assertEqual(btest, TestModelB.objects.get(**kwargsa))
kwargsa = {'field1C': 'Hello Egypt!!', 'field2C': btest}
ctest = create_model(TestModelC, kwargsa.items())
self.assertEqual(ctest, TestModelC.objects.get(**kwargsa))
kwargsa = {'field1D': 77, 'field2D': TestModelA.objects.all()}
dtest = create_model(TestModelD, kwargsa.items())
self.assertEqual(dtest, TestModelD.objects.get(**kwargsa))
class TestDependencies(TestCase):
def test(self):
self.assertEqual(dependencies(TestModelD), [])
self.assertEqual(set(dependencies(TestModelE)),
set([TestModelB, TestModelC]))
self.assertEqual(dependencies(TestModelC), [TestModelB])
self.assertEqual(dependencies(TestModelB), [TestModelA])
self.assertEqual(dependencies(CycleD), [CycleC])
self.assertFalse(dependencies(CycleC))
self.assertEqual(set(dependencies(TestModelFields)),
set([TestModelY, TestModelX]))
class TestTopologicalSorting(TestCase):
def test(self):
self.assertEqual(topological_sort([ExtendingModel, TestModel1,
TestModel0]),
[ExtendingModel, TestModel0, TestModel1])
self.assertEqual(topological_sort([TestModel1, TestModel0]),
[TestModel0, TestModel1])
self.assertEqual(topological_sort([TestModel0, TestModel1]),
[TestModel0, TestModel1])
def assertions(sorted_list):
self.assertTrue(sorted_list.index(TestModelA) <
sorted_list.index(TestModelB))
self.assertTrue(sorted_list.index(TestModelB) <
sorted_list.index(TestModelC))
self.assertTrue(sorted_list.index(TestModelB) <
sorted_list.index(TestModelE))
self.assertTrue(sorted_list.index(TestModelC) <
sorted_list.index(TestModelE))
self.assertTrue(ExtendingModel in sorted_list)
for perm in itertools.permutations([TestModelA, TestModelB, TestModelD,
TestModelC, TestModelE,
ExtendingModel]):
assertions(topological_sort(list(perm)))
class TestUniqueConstraints(TestCase):
def test(self):
constraint = unique_items(('fieldA', 'fieldD',))
model = TestModelFieldsTwo(fieldA='A', fieldD=5, fieldB=10,
fieldC='Winner', fieldE=True, fieldF=6,
fieldG='Mathematics', fieldH=False)
model.save()
fields = list_of_fields(TestModelFields)
self.assertFalse(constraint([('fieldA', 'A'), ('fieldD', 5)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A')],
TestModelFields, fields[5]))
self.assertFalse(constraint([('fieldA', 'A'), ('fieldD', 5)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A'), ('fieldD', 3)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A')],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A'), ('fieldD', 3)],
TestModelFieldsTwo, fields[5]))
class TestSortTuple(TestCase):
def test(self):
flds = tuple(names_of_fields(TestModelFields))
self.assertEqual(sort_unique_tuple(('fieldA', 'fieldX', 'fieldG',
'fieldD'), TestModelFields),
('fieldA', 'fieldD', 'fieldG', 'fieldX'))
self.assertEqual(sort_unique_tuple(flds[::-1], TestModelFields), flds)
self.assertEqual(sort_unique_tuple(('fieldD', 'fieldH', 'fieldF'),
TestModelFields),
('fieldD', 'fieldF', 'fieldH'))
class TestSortTuples(TestCase):
def test(self):
self.assertEqual(sort_unique_tuples((('fieldA',), ('fieldA', 'fieldD'),
('fieldC', 'fieldX', 'fieldB'),
('fieldC', 'fieldE', 'fieldH'),
('fieldA', 'fieldX', 'fieldC')),
TestModelFields),
(('fieldA',), ('fieldA', 'fieldC', 'fieldX'),
('fieldA', 'fieldD'), ('fieldB', 'fieldC', 'fieldX'),
('fieldC', 'fieldE', 'fieldH')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldE', 'fieldX'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD', 'fieldX'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldD', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldE'),
('fieldA', 'fieldE', 'fieldX')),
TestModelFields),
(('fieldA', 'fieldE'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'), ('fieldA', 'fieldD')))
class TestDFS(TestCase):
def test(self):
def func(cur_tuple, models, field):
dic = dict(cur_tuple)
keys = dic.keys()
if not 'fieldD' in keys:
return True
elif dic['fieldD'] % 3 != 1:
return False
if not ('fieldE' in keys and 'fieldH' in keys):
return True
elif dic['fieldE'] ^ dic['fieldH']:
return False
return True
dfs.size = 30
to_be_computed = []
cur_tup = [('fieldA', 'X'), ('fieldB', 199), ('fieldC', 'general')]
unique_together = TestModelFieldsTwo._meta.unique_together
unique = list(unique_together)
unique = sort_unique_tuples(unique, TestModelFieldsTwo)
unique_constraints = [unique_items(un_tuple) for un_tuple in unique]
constraints = [func] + unique_constraints
dfs(cur_tup, 4, to_be_computed, constraints, TestModelFieldsTwo, False)
self.assertEqual(len(list(TestModelFieldsTwo.objects.all())), 30)
for mdl in list(TestModelFieldsTwo.objects.all()):
self.assertEqual(mdl.fieldA, 'X')
self.assertEqual(mdl.fieldB, 199)
self.assertEqual(mdl.fieldC, 'general')
self.assertTrue(mdl.fieldD in [13, 19, 31, 43])
self.assertTrue(mdl.fieldF in [6, 28, 496, 8128, 33550336])
self.assertTrue(mdl.fieldG in ['Mathematics', 'Physics',
'Chemistry', 'Biology'])
self.assertTrue(not (mdl.fieldE ^ mdl.fieldH))
class TestGenerateModel(TestCase):
def test(self):
generate_model(TestModelX, 5)
generate_model(TestModelY, 95)
generated_models = list(TestModelY.objects.all())
length = len(generated_models)
self.assertTrue(len(TestModelX.objects.all()) * 18 == length)
generate_model(TestModelA, 7)
self.assertEqual(len(TestModelA.objects.all()), 6)
generate_model(TestModelB, 17)
self.assertEqual(len(TestModelB.objects.all()), 12)
generate_model(TestModelC, 53)
self.assertEqual(len(TestModelC.objects.all()), 12)
for model in generated_models:
self.assertTrue(isinstance(model, TestModelY))
self.assertTrue(model.field1Y in [2, 3, 5, 7, 11, 13])
self.assertTrue(model.field2Y in ['MMa', 'XXa', 'azz'])
self.assertTrue(model.field3Y in TestModelX.objects.all())
to_be_computed_test = generate_model(TestModelFieldsTwo, 50)
self.assertTrue(to_be_computed_test)
self.assertEqual(TestModelFieldsTwo, to_be_computed_test[0])
self.assertTrue(to_be_computed_test[1])
for fld in to_be_computed_test[1]:
self.assertTrue(is_related(fld)
and 'ManyToMany' in relation_type(fld))
self.assertEqual(fld.rel.to, TestModelE)
generate_model(TestModelE, 2, shuffle=False)[0]
generated_models = list(TestModelE.objects.all())
for model in generated_models:
self.assertTrue(isinstance(model, TestModelE))
self.assertTrue(model.field4E in [1000000009, 1000003, 101])
self.assertTrue(model.field1E in TestModelB.objects.all())
self.assertTrue(all([x in TestModelA.objects.all()
for x in model.field2E.all()]))
self.assertTrue(model.field3E in TestModelC.objects.all())
class TestRecompute(TestCase):
def test(self):
c = CycleC(c='3.14159')
c.save()
d = CycleD(d=53, dc=c)
d.save()
b = CycleB(b=1000000009, bc=c)
b.save()
e = CycleE(e=17, ec=c, ed=d)
e.save()
a = CycleA(a=999, ab=b, ae=e)
a.save()
f = CycleF(f=123, fd=d)
f.save()
recompute(CycleD, list_of_fields(CycleD)[2])
recompute(CycleC, list_of_fields(CycleC)[1])
recompute(CycleC, list_of_fields(CycleC)[3])
self.assertTrue(CycleD.objects.all()[0].df)
self.assertTrue(CycleC.objects.all()[0].ca)
self.assertTrue(CycleC.objects.all()[0].cc.all())
class TestGenerateData(TestCase):
def test(self):
generate_test_data('tests.models', 10)
length = len(list_of_models(mdls))
visited = dict(zip(list_of_models(mdls), length * [False]))
pairs = []
data_base = dict([(mdl, list(mdl.objects.all()))
for mdl in list_of_models(mdls)])
generated_data = data_base.values()
nodes = 0
edges = 0
for list_model in generated_data:
for model in list_model:
visited[model.__class__] = True
fields = list_of_fields(model.__class__)
nodes += 1
for field in fields:
if (not is_auto_field(field) and
not is_reverse_related(field)):
val = getattr(model, field.name)
if is_related(field):
if 'ManyToMany' in relation_type(field):
r = data_base[field.rel.to]
self.assertTrue(list(val.all()))
self.assertTrue(all([x in r for
x in list(val.all())]))
else:
r = data_base[field.rel.to]
self.assertTrue(val in r)
edges += 1
else:
this_model = field.model
while (this_model != Model and not
(hasattr(this_model, 'TestData') and
hasattr(this_model.TestData, field.name))
and not os.path.exists(
'%s/TestTemplates/sample__%s__%s' %
(this_model._meta.app_label,
this_model.__name__, field.name))):
this_model = this_model.__base__
if this_model == Model:
self.assertEqual(model.__class__,
AllFieldsModel)
sample_values = field_sample_values(field)
if val.__class__ == unicode:
val = str(val)
self.assertTrue(val.__class__ in
map(lambda val: val.__class__,
sample_values))
elif (field.__class__.__name__ == 'DecimalField' or
field.__class__.__name__ == 'FloatField'):
sample_values = map(float,
field_sample_values(field))
val = float(val)
self.assertTrue(any(abs(val - fld_value) < 1e-5
for fld_value in
sample_values))
else:
sample_values = map(str,
field_sample_values(field))
val = str(val)
self.assertTrue(val in sample_values)
if model.__class__ == TestModelFields:
pr = (model.fieldC, model.fieldA)
self.assertFalse(pr in pairs)
pairs.append(pr)
self.assertTrue((model.fieldB < 50)
or (model.fieldD / 2 % 2 == 1))
self.assertTrue(all(visited.values()),
"Not all the models with sample data are generated.")
class TestDjenerator(TestCase):
def test(self):
fl = tempfile.TemporaryFile()
djenerator('tests', 1, fl, printing=False)
fl.seek(0)
length = len(fl.read())
self.assertGreater(length, 600)
class TestFieldsGeneratorNumbers(TestCase):
def test(self):
counts = {}
for times in xrange(100):
for bits in xrange(2, 64):
for negative_allowed in xrange(0, 2):
gen_val = generate_integer(bits, negative_allowed)
self.assertIn(gen_val.__class__, [int, long])
if not negative_allowed:
self.assertGreaterEqual(gen_val, 0)
self.assertLess(gen_val, 2 ** (bits - 1))
else:
self.assertGreaterEqual(gen_val, -2 ** (bits - 1))
self.assertLess(gen_val, 2 ** (bits - 1))
gen_val = generate_int()
self.assertEqual(gen_val.__class__, int)
self.assertLessEqual(abs(gen_val), 2 ** 31)
self.assertLess(gen_val, 2 ** 31)
gen_val = generate_big_integer()
self.assertIn(gen_val.__class__, [int, long])
self.assertLessEqual(abs(gen_val), 2 ** 63)
self.assertLess(gen_val, 2 ** 63)
gen_val = generate_small_integer()
self.assertEqual(gen_val.__class__, int)
self.assertLessEqual(abs(gen_val), 2 ** 15)
self.assertLess(gen_val, 2 ** 15)
gen_val = generate_positive_integer()
self.assertIn(gen_val.__class__, [int, long])
self.assertLess(gen_val, 2 ** 31)
self.assertGreaterEqual(gen_val, 0)
gen_val = generate_positive_small_integer()
self.assertEqual(gen_val.__class__, int)
self.assertLess(gen_val, 2 ** 15)
self.assertGreaterEqual(gen_val, 0)
gen_val = generate_boolean()
self.assertEqual(gen_val.__class__, bool)
gen_val = generate_boolean(True)
self.assertTrue((gen_val is None) or (gen_val.__class__ == bool))
gen_val = generate_ip()
self.assertEqual(gen_val.__class__, str)
ip_regex = r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$'
match = re.search(ip_regex, gen_val)
self.assertRegexpMatches(gen_val, ip_regex)
self.assertIsNotNone(match)
match = map(int, match.groups())
self.assertTrue(all([x in range(256) for x in match]))
gen_val = generate_comma_separated_int(random.randint(1, 1000))
self.assertEqual(gen_val.__class__, str)
comma_sep_regex = r'^\d{1,3}(?:,\d{3})*$'
self.assertRegexpMatches(gen_val, comma_sep_regex)
for digits in xrange(50):
for decimal in xrange(1, digits):
gen_val = generate_decimal(digits, decimal)
self.assertEqual(gen_val.__class__, Decimal)
gen_val = str(gen_val)
if 'decimal_contains_dot' in counts.keys():
counts['decimal_contains_dot'] += 1
else:
counts['decimal_contains_dot'] = 1
self.assertLessEqual(len(gen_val), digits + 1, gen_val)
self.assertLessEqual(len(gen_val.split('.')[1]),
decimal + (decimal == 0), gen_val)
class TestFieldsGeneratorStringGenerators(TestCase):
def test(self):
for length in xrange(1, 3):
gen_sentence = generate_sentence(length)
self.assertEqual(len(gen_sentence), length)
for length in xrange(3, 50):
seperators = [['.'], ['-', '_'], ['@']]
for sep in seperators:
for _ in xrange(20):
gen_val = generate_sentence(length, seperators=sep)
self.assertEqual(gen_val.__class__, str)
self.assertLessEqual(len(gen_val), length * 2)
reg = r'^(?:\w+(?:%s))*\w+\.$' % str.join('|', sep)
self.assertRegexpMatches(gen_val, reg)
gen_text = generate_text(length)
txt_re = r'^(?:(?:\w+\s?)+\.)+(?:\s(?:\w+\s?)+\.)*$'
self.assertLessEqual(len(gen_text), length)
self.assertRegexpMatches(gen_text, txt_re, gen_text)
gen_sentence = generate_sentence(length)
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+\.$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=['', '.'])
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+\.?$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=None)
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=['.', ','])
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+[\.,]$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
class TestFieldsGeneratorChar(TestCase):
def test(self):
ascii_val = dict([(chr(n), n) for n in xrange(128)])
ascii_rng = lambda beg, end: xrange(ascii_val[beg], ascii_val[end] + 1)
chr_range = lambda beg, end: map(chr, ascii_rng(beg, end))
for log in xrange(0, 6):
lengths = random.sample(range(10 ** log,
10 ** (log + 1) + 1 - bool(log)), 10)
for length in lengths:
for tup in itertools.product(*zip(6 * [True], 6 * [False])):
lower, upper, digits, special, null_allowed, exact = tup
if random.randint(1, 6) < 3:
special = ['@', '!', '~']
if not (lower or upper or digits or special):
continue
gen_val = generate_string(length, lower, upper, digits,
special, null_allowed, exact)
existing_chars = set([])
for char in gen_val:
existing_chars.add(char)
excluded = []
if not upper:
excluded.extend(chr_range('A', 'Z'))
if not lower:
excluded.extend(chr_range('a', 'z'))
if not digits:
excluded.extend(chr_range('0', '9'))
if not special:
excluded.extend(chr_range('!', '/'))
excluded.extend(chr_range(':', '@'))
excluded.extend(chr_range('[', '`'))
excluded.extend(chr_range('{', '~'))
else:
if isinstance(special, list):
special_excluded = []
special_excluded.extend(chr_range('!', '/'))
special_excluded.extend(chr_range(':', '@'))
special_excluded.extend(chr_range('[', '`'))
special_excluded.extend(chr_range('{', '~'))
special_excluded = set(special_excluded)
special_excluded = special_excluded - set(special)
excluded.extend(list(special_excluded))
self.assertFalse(existing_chars & set(excluded),
str(existing_chars) +
str(set(excluded) & existing_chars))
if exact:
self.assertEqual(len(gen_val), length)
elif not null_allowed:
self.assertGreater(len(gen_val), 0)
self.assertGreaterEqual(len(gen_val), 0)
self.assertLessEqual(len(gen_val), length)
email = generate_email(length)
self.assertTrue(isinstance(email, str), email)
self.assertLessEqual(len(email), length)
if length >= 7:
email_reg = r'^\w+(?:\.\w+)*@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$'
self.assertRegexpMatches(email, email_reg)
url = generate_url(length)
self.assertTrue(isinstance(url, str), url)
self.assertLessEqual(len(url), length)
if length >= 16:
url_re = r'^(?:http|ftp|https)://(?:[a-z0-9_\-]+\.?)+/?'
url_re += r'(?:/[a-z0-9_\-]+)*/?$'
self.assertRegexpMatches(url, url_re)
class TestFieldsGeneratorDateTime(TestCase):
def test(self):
for _ in xrange(10000):
gen_val = generate_date_time()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.datetime)
gen_val = generate_time()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.time)
gen_val = generate_date()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.date)
for _ in xrange(100):
gen_val = generate_date_time(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.datetime)
now = datetime.datetime.now()
self.assertLess(abs((gen_val - now).total_seconds()), 10e-4)
gen_val = generate_time(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.time)
now = datetime.datetime.now().time()
gen_val_hash = gen_val.second
gen_val_hash += gen_val.hour * 3600 + gen_val.minute * 60
now_hash = now.hour * 3600 + now.minute * 60 + now.second
self.assertLessEqual(gen_val_hash, now_hash + 1)
gen_val = generate_date(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.date)
now = datetime.datetime.now().date()
self.assertEqual(gen_val, now)
|
|
import copy
import csv
import pytz
from datetime import datetime
from dateutil.relativedelta import relativedelta, weekday
from django.core import mail
from django.core.exceptions import ValidationError
from django.db import connection, transaction, models
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import get_current_timezone
from django.contrib import messages
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.admin import helpers
from .forms import MassMailForm, CopyEventsForm, RecurringEventForm
from .models import (Resource, Event, RuleGroup, Rule, Price, PriceTable,
GroupSetting)
from .api import start_of_week
try:
from rcal_registration.admin import UserProfileInline
except ImportError:
UserProfileInline = None
class CSVExportMixin(object):
actions = ['export_as_csv']
export_fields = []
def export_as_csv(self, request, queryset):
response = HttpResponse(content_type='text/plain')
response['Filename'] = 'export.csv'
response['Content-Disposition'] = 'attachment; filename=export.csv'
writer = csv.writer(response)
# Not too nice, but works well enough for us.
writer.writerow([i.replace('__', '.') for i in self.export_fields])
# Apply select/prefetch_related where it makes sense
parts = [i.split('__') for i in self.export_fields]
for p in parts:
if len(p) > 1:
join_cond = '__'.join(p[:-1])
if isinstance(self.model._meta.get_field_by_name(p[0]), models.ForeignKey):
queryset = queryset.select_related(join_cond)
else:
queryset = queryset.prefetch_related(join_cond)
connection.queries[:] = []
for obj in queryset:
data = []
for lookups in parts:
res = obj
for index, lookup in enumerate(lookups):
is_m2m = isinstance(res._meta.get_field_by_name(lookup)[0], models.ManyToManyField)
if is_m2m:
res = [getattr(x, lookups[index+1]) for x in getattr(res, lookup).all()]
res = ';'.join(res)
break
else:
res = getattr(res, lookup)
data.append((1 if res else 0) if isinstance(res, bool) else res)
data = map(lambda x: x.encode('utf-8') if isinstance(x, unicode) else x, data)
writer.writerow(data)
return response
export_as_csv.short_description = _('Export as CSV')
class EventAdmin(CSVExportMixin, admin.ModelAdmin):
list_display = ('title', 'resource', 'start', 'end', 'user',
'group', 'missed', 'payed')
list_filter = ('missed', 'payed', 'resource', 'group')
list_editable = ('missed', 'payed')
date_hierarchy = 'start'
search_fields = ('title', 'user__username', 'user__last_name',
'user__first_name')
export_fields = ['title', 'resource__name', 'user__username', 'start',
'end', 'missed', 'payed', 'price', 'group__name']
fieldsets = (
(None, {
'fields': (('title', 'price', 'payed', 'missed'), ('user', 'group'),
'description', ('start', 'end', 'resource'), 'added_by')}),
)
def get_recurring_event_form(self, request):
return RecurringEventForm(request.POST if request.method=='POST' else None,
empty_permitted=True)
def render_change_form(self, request, context, *args, **kwargs):
context['recurring_form'] = self.get_recurring_event_form(request)
return super(EventAdmin, self).render_change_form(request, context, *args, **kwargs)
def queryset(self, request):
return super(EventAdmin, self).queryset(request)\
.prefetch_related('user', 'group', 'resource')
def get_form(self, request, obj=None, **kwargs):
# TODO: HACK :(
form = super(EventAdmin, self).get_form(request, obj, **kwargs)
recurring_event_form = self.get_recurring_event_form(request)
old_clean = form.clean
def new_clean(self):
cleaned_data = old_clean(self)
if not recurring_event_form.is_valid():
raise ValidationError(_('Please correct the errors in the repetition form.'))
return cleaned_data
form.clean = new_clean
return form
def save_model(self, request, obj, form, change):
obj.save()
recurring_form = self.get_recurring_event_form(request)
if recurring_form.is_valid():
if not recurring_form.cleaned_data:
return
end_date = recurring_form.cleaned_data['end_date']
type = recurring_form.cleaned_data['type']
type_map = {
'daily': relativedelta(days=+1),
'weekly': relativedelta(weeks=+1),
'monthly': relativedelta(months=+1)
}
increment = type_map[type]
cp_obj = obj
delta = obj.end - obj.start
objects = []
failure = False
while cp_obj.start.date() < end_date:
cp_obj = copy.copy(cp_obj)
# Take DST into account.
start = cp_obj.start.replace(tzinfo=None)
start += increment
cp_obj.start = get_current_timezone().localize(start)
cp_obj.end = cp_obj.start + delta
cp_obj.pk = None
if cp_obj.start.date() > end_date:
continue
try:
cp_obj.full_clean()
except ValidationError:
failure = True
else:
objects.append(cp_obj)
Event.objects.bulk_create(objects)
if failure:
messages.add_message(request, messages.ERROR,
_('Not all events copied.'))
else:
messages.add_message(request, messages.SUCCESS,
_('Events copied successfully.'))
class RuleInline(admin.TabularInline):
model = Rule
class RuleGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'action')
filter_horizontal = ('groups',)
inlines = (RuleInline,)
ordering = ('name', 'action')
class ResourceAdmin(admin.ModelAdmin):
ordering = ('name',)
class UserAdmin(CSVExportMixin, auth_admin.UserAdmin):
if UserProfileInline:
inlines = [UserProfileInline]
list_display = auth_admin.UserAdmin.list_display + ('is_active', 'assigned_groups',)
list_filter = auth_admin.UserAdmin.list_filter + ('groups',)
filter_horizontal = ('groups', 'user_permissions')
actions = ['delete_selected', 'send_mass_mail', 'toggle_user_status']
export_fields = ['first_name', 'last_name', 'email',
'userprofile__phone_number', 'userprofile__display_name',
'userprofile__address', 'userprofile__city', 'groups__name']
def assigned_groups(self, obj):
return ','.join(map(lambda x: x.name, obj.groups.all()))
assigned_groups.short_description = _('groups')
def queryset(self, request):
return super(UserAdmin, self).queryset(request)\
.prefetch_related('groups')
def send_mass_mail(self, request, queryset):
opts = self.model._meta
app_label = opts.app_label
form = MassMailForm()
if request.POST.get('post'):
form = MassMailForm(request.POST)
if form.is_valid():
data = []
recipients = [user.email for user in queryset if user.email]
for recipient in recipients:
data.append((form.cleaned_data['subject'], form.cleaned_data['text'],
None, (recipient,)))
mail.send_mass_mail(data)
messages.success(request, _('Mails sent successfully.'))
return # Let django handle redirections.
context = {
'title': _('Send mass mail'),
'queryset': queryset,
'opts': opts,
'app_label': app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'form': form,
}
return TemplateResponse(request, 'rcal/send_mass_mail.html', context,
current_app=self.admin_site.name)
send_mass_mail.short_description = _('Send mass mail')
def toggle_user_status(self, request, queryset):
cursor = connection.cursor()
sql = "UPDATE auth_user SET is_active = NOT is_active WHERE is_superuser = False"
if int(request.POST.get('select_across')):
cursor.execute(sql)
else:
sql += " AND id in %s"
cursor.execute(sql, [tuple(request.POST.getlist('_selected_action'))])
transaction.commit_unless_managed()
messages.success(request, _('Toggled user status successfully.'))
toggle_user_status.short_description = _('Toggle the active flag of selected non superusers')
class PriceInline(admin.TabularInline):
model = Price
class PricetableAdmin(admin.ModelAdmin):
fieldsets = ((None, {
'fields': ('name', 'weekdays', ('groups', 'resources'),
('valid_from', 'valid_to'))
}),)
inlines = [PriceInline]
class GroupSettingInline(admin.TabularInline):
model = GroupSetting
auth_admin.GroupAdmin.inlines = [GroupSettingInline]
admin.site.register(Event, EventAdmin)
admin.site.register(Resource, ResourceAdmin)
admin.site.register(RuleGroup, RuleGroupAdmin)
admin.site.unregister(auth_admin.User)
admin.site.register(auth_admin.User, UserAdmin)
admin.site.register(PriceTable, PricetableAdmin)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
from src.ui import *
from src.general import *
from src.files import *
from src.features import *
from src.dataset import *
from src.evaluation import *
import numpy
import csv
import argparse
import textwrap
import timeit
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from sklearn import mixture
from sklearn import preprocessing as pp
import xgboost as xgb
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
def main(argv):
tot_start = timeit.default_timer()
numpy.random.seed(123456) # let's make randomization predictable
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( toni.heittola@tut.fi )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0]+'.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
plot_name = params['classifier']['method']
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at ["+params['path']['challenge_results']+"]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end-train_start)
print " "
print " "
print "Test Time : " + str(test_end-test_start)
print " "
print " "
print "Total Time : " + str(tot_end-tot_start)
print " "
final_result['train_time'] = train_end-train_start
final_result['test_time'] = test_end-test_start
final_result['tot_time'] = tot_end-tot_start
joblib.dump(final_result, 'result' + plot_name + '.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True, fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds', overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='xgboost', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['xgboost']
classifier method, currently only GMM supported
(Default value='xgboost')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'xgboost':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'xgboost':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label,len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
print tot_data['y']
tot_data['y'] = le.fit_transform(tot_data['y'])
DT = xgb.DMatrix(tot_data['x'], tot_data['y'])
#params = {'max_depth': 30, 'num_class': 10, 'objective' : 'multi:softmax'}
#params = **classifier_params
print classifier_params
model_container['models'] = xgb.train(classifier_params, DT, 20)
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='xgboost', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['xgboost']
classifier method, currently only GMM supported
(Default value='xgboost')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'xgboost':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True, fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'xgboost':
#feature_data = xgb.DMatrix(feature_data, numpy.repeat(item['scene_label'], len(feature_data), axis=0))
feature_data = xgb.DMatrix(feature_data)
current_result = dataset.scene_labels[do_classification_xgboost(feature_data, model_container)]
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Store the result
results.append((dataset.absolute_to_relative(item['file']), current_result))
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return model_container['models'].keys()[classification_result_id]
def do_classification_xgboost(feature_data, model_container):
"""xgboost classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(10)
logls.fill(-numpy.inf)
#for label_id, label in enumerate(model_container['models']):
# logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
#logls = numpy.sum(model_container['models'].predict_log_proba(feature_data),0)
logls = numpy.sum(numpy.log(model_container['models'].predict(feature_data)),0)
classification_result_id = numpy.argmax(logls)
return classification_result_id
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
results_fold = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
print tot_cm
#plot_cm(tot_cm, dataset.scene_labels,name=plot_name)
#joblib.dump(tot_cm, plot_name + '.pkl')
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm))/numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold'+str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy')+fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][label] * 100)+fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100)+fold_values
final_result['result'] = results
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
|
|
#
# Copyright (c) 2012-2017 Kevin Steves <kevin.steves@pobox.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
import logging
import xml.etree.ElementTree as etree
from . import __version__, DEBUG1, DEBUG2, DEBUG3
_encoding = 'utf-8'
_tags_forcelist = set(['entry', 'member'])
class PanConfigError(Exception):
pass
class PanConfig:
def __init__(self,
config=None,
tags_forcelist=_tags_forcelist):
self._log = logging.getLogger(__name__).log
self._config_version = 0 # 0 indicates not yet set
self._config_panorama = None
self._config_multi_vsys = None
self._log(DEBUG3, 'Python version: %s', sys.version)
self._log(DEBUG3, 'xml.etree.ElementTree version: %s', etree.VERSION)
self._log(DEBUG3, 'pan-python version: %s', __version__)
if config is None:
raise PanConfigError('no config')
self._log(DEBUG2, '%s', type(config))
if hasattr(config, 'tag'):
self.config_root = config
else:
try:
self.config_root = etree.fromstring(config)
except etree.ParseError as msg:
raise PanConfigError('ElementTree.fromstring ParseError: %s'
% msg)
self._log(DEBUG1, 'config_root: %s', self.config_root)
def __find_xpath(self, xpath=None):
# Not a true Xpath
# http://docs.python.org/dev/library/xml.etree.elementtree.html#xpath-support
self._log(DEBUG1, 'xpath: %s', xpath)
if xpath:
try:
nodes = self.config_root.findall(xpath)
except SyntaxError as msg:
raise PanConfigError('ElementTree.find SyntaxError: %s' % msg)
else:
nodes = [self.config_root]
self._log(DEBUG1, 'xpath nodes: %s', nodes)
return nodes
def config_version(self):
if self._config_version != 0:
return self._config_version
self._config_version = None
if self.config_root.tag == 'config':
self._config_version = \
self.config_root.get('version', default=None)
return self._config_version
def config_panorama(self):
if self._config_panorama is not None:
return self._config_panorama
xpaths = [
"./panorama",
"./devices/entry[@name='localhost.localdomain']/device-group",
]
if self.config_root.tag == 'config':
for xpath in xpaths:
elem = self.config_root.find(xpath)
if elem is not None:
self._config_panorama = True
break
else:
self._config_panorama = False
return self._config_panorama
def config_multi_vsys(self):
if self._config_multi_vsys is not None:
return self._config_multi_vsys
path = "./devices/entry[@name='localhost.localdomain']/vsys/entry"
if self.config_root.tag == 'config':
nodes = self.config_root.findall(path)
if len(nodes) > 1:
self._config_multi_vsys = True
else:
self._config_multi_vsys = False
return self._config_multi_vsys
def xml(self, xpath=None):
nodes = self.__find_xpath(xpath)
if not nodes:
return None
s = ''.encode()
for elem in nodes:
s += etree.tostring(elem, encoding=_encoding)
if not s:
return None
self._log(DEBUG3, 'xml: %s', type(s))
self._log(DEBUG3, 'xml.decode(): %s', type(s.decode(_encoding)))
return s.decode(_encoding)
def python(self, xpath=None):
nodes = self.__find_xpath(xpath)
if not nodes:
return None
d = {}
if len(nodes) > 1:
for elem in nodes:
self.__serialize_py(elem, d)
else:
self.__serialize_py(nodes[0], d)
return d
def __serialize_py(self, elem, obj, forcelist=False):
tag = elem.tag
text = elem.text
tail = elem.tail # unused
text_strip = None
if text:
text_strip = text.strip()
attrs = elem.items()
self._log(DEBUG3, 'TAG(forcelist=%s): "%s"', forcelist, tag)
if forcelist:
if tag not in obj:
obj[tag] = []
if not len(elem) and not text_strip and not attrs:
obj[tag].append(None)
return
if not len(elem) and text_strip and not attrs:
obj[tag].append(text)
return
obj[tag].append({})
o = obj[tag][-1]
else:
if not len(elem) and not text_strip and not attrs:
obj[tag] = None
return
if not len(elem) and text_strip and not attrs:
if text_strip == 'yes':
obj[tag] = True
elif text_strip == 'no':
obj[tag] = False
else:
obj[tag] = text
return
obj[tag] = {}
o = obj[tag]
for k, v in attrs:
# o['@' + k] = v
o[k] = v
if text_strip:
o[tag] = text
if len(elem):
tags = {}
for e in elem:
if e.tag in tags:
tags[e.tag] += 1
else:
tags[e.tag] = 1
for e in elem:
forcelist = False
if e.tag in _tags_forcelist or tags[e.tag] > 1:
forcelist = True
self.__serialize_py(e, o, forcelist)
def flat(self, path, xpath=None):
nodes = self.__find_xpath(xpath)
if not nodes:
return None
obj = []
for elem in nodes:
self.__serialize_flat(elem, path + elem.tag, obj)
return obj
def __serialize_flat(self, elem, path, obj):
tag = elem.tag
text = elem.text
tail = elem.tail # unused
text_strip = None
if text:
text_strip = text.strip()
attrs = elem.items()
self._log(DEBUG3, 'TAG(elem=%d): "%s"', len(elem), tag)
self._log(DEBUG3, 'text_strip: "%s"', text_strip)
self._log(DEBUG3, 'attrs: %s', attrs)
self._log(DEBUG3, 'path: "%s"', path)
self._log(DEBUG3, 'obj: %s', obj)
self._log(DEBUG3, '')
if not text_strip:
obj.append(path)
elif text_strip:
lines = text.splitlines()
if len(lines) > 1:
n = 1
for line in lines:
s = path + '[%d]="%s"' % (n, line)
obj.append(s)
n += 1
else:
s = path + '="%s"' % text
obj.append(s)
for k, v in attrs:
path += "[@%s='%s']" % (k, v)
obj.append(path)
for e in elem:
self.__serialize_flat(e, path + '/' + e.tag, obj)
def __quote_arg(self, s):
# XXX string with " etc.
if '"' in s:
return "'%s'" % s
if ' ' in s:
return '"%s"' % s
return s
def set_cli(self, path, xpath=None, member_list=False):
nodes = self.__find_xpath(xpath)
if not nodes:
return None
obj = []
for elem in nodes:
self.__serialize_set_cli(elem, path + elem.tag, obj,
member_list)
return obj
def __serialize_set_cli(self, elem, path, obj, member_list=False):
tag = elem.tag
text = elem.text
tail = elem.tail # unused
text_strip = None
if text:
text_strip = text.strip()
attrs = elem.items()
self._log(DEBUG3, 'TAG(elem=%d member_list=%s): "%s"',
len(elem), member_list, tag)
self._log(DEBUG3, 'text_strip: "%s"', text_strip)
self._log(DEBUG3, 'attrs: %s', attrs)
self._log(DEBUG3, 'path: "%s"', path)
self._log(DEBUG3, 'obj: %s', obj)
self._log(DEBUG3, '')
for k, v in attrs:
if k == 'name':
path += ' ' + self.__quote_arg(v)
if member_list:
nodes = elem.findall('./member')
self._log(DEBUG3, 'TAG(members=%d): "%s"', len(nodes), tag)
if len(nodes) > 1:
members = []
for e in nodes:
members.append(self.__quote_arg(e.text))
path += ' [ ' + ' '.join(members) + ' ]'
obj.append(path)
return
if not len(elem):
if text_strip:
path += ' ' + self.__quote_arg(text)
obj.append(path)
for e in elem:
tpath = path
if e.tag not in ['entry', 'member']:
tpath += ' ' + e.tag
self.__serialize_set_cli(e, tpath, obj, member_list)
def config_xpaths(self):
xpaths_panos_4_1 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/network
./shared
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/zone
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-admin-override
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/captive-portal
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/group-mapping
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ts-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-content-types
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/region
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-filter
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/threats
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/schedule
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/email-scheduler
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/pdf-summary-report
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/report-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/reports
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/setting
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/display-name
./mgt-config
'''
xpaths_panos_5_0 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/network
./shared
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/region
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-filter
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/threats
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/schedule
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/external-list
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/email-scheduler
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/pdf-summary-report
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/report-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/reports
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/zone
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ocsp-responder
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-admin-override
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-collector
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/captive-portal
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/group-mapping
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent-sequence
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ts-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-content-types
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/setting
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/display-name
./mgt-config
'''
# add: tag, vm-info-source, import
xpaths_panos_6_0 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/network
./shared
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/tag
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/region
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-filter
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/threats
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/schedule
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/external-list
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/email-scheduler
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/pdf-summary-report
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/report-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/reports
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/zone
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ocsp-responder
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-admin-override
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-collector
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/captive-portal
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/group-mapping
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent-sequence
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/vm-info-source
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ts-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-content-types
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/import
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/setting
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/display-name
./mgt-config
'''
xpaths_panos_7_0 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/network
./shared
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/zone
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/vm-info-source
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-collector
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent-sequence
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-content-types
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/url-admin-override
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/ts-agent
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/threats
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/tag
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/setting
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/schedule
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/route
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/reports
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/report-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/region
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profiles
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/profile-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/pdf-summary-report
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/import
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/group-mapping
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/external-list
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/email-scheduler
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/dns-proxy
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/display-name
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/captive-portal
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-filter
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address-group
./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/address
./mgt-config
'''
s = '''./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/application-tag'''
x = xpaths_panos_7_0.split('\n')
x.insert(34, s)
xpaths_panos_7_1 = '\n'.join(x)
s = '''./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/user-id-ssl-auth'''
x = xpaths_panos_7_1.split('\n')
x.insert(6, s)
s = '''./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/authentication-object'''
x.insert(35, s)
xpaths_panos_8_0 = '\n'.join(x)
s = '''./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/sdwan-interface-profile'''
x = xpaths_panos_8_0.split('\n')
x.insert(18, s)
s = '''./devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/dynamic-user-group'''
x.insert(33, s)
xpaths_panos_9_1 = '\n'.join(x)
xpaths_panos_multi_vsys_4_1 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/network
./shared
./devices/entry[@name='localhost.localdomain']/vsys
./mgt-config
'''
xpaths_panorama_4_1 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/device-group
./panorama
./shared
./mgt-config
'''
xpaths_panorama_5_0 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/device-group
./devices/entry[@name='localhost.localdomain']/template
./devices/entry[@name='localhost.localdomain']/log-collector
./devices/entry[@name='localhost.localdomain']/log-collector-group
./panorama
./shared
./mgt-config
'''
# add: vmware-service-manager, predefined
xpaths_panorama_6_0 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/device-group
./devices/entry[@name='localhost.localdomain']/template
./devices/entry[@name='localhost.localdomain']/log-collector
./devices/entry[@name='localhost.localdomain']/log-collector-group
./devices/entry[@name='localhost.localdomain']/vmware-service-manager
./predefined
./panorama
./shared
./mgt-config
'''
# remove: predefined
xpaths_panorama_6_1 = '''
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/device-group
./devices/entry[@name='localhost.localdomain']/template
./devices/entry[@name='localhost.localdomain']/log-collector
./devices/entry[@name='localhost.localdomain']/log-collector-group
./devices/entry[@name='localhost.localdomain']/vmware-service-manager
./panorama
./shared
./mgt-config
'''
# reorder
# add readonly
xpaths_panorama_7_0 = '''
./devices/entry[@name='localhost.localdomain']/device-group
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/log-collector
./devices/entry[@name='localhost.localdomain']/log-collector-group
./devices/entry[@name='localhost.localdomain']/template
./devices/entry[@name='localhost.localdomain']/template-stack
./devices/entry[@name='localhost.localdomain']/vmware-service-manager
./mgt-config
./panorama
./readonly
./shared
'''
# remove vmware-service-manager
# add plugins, wildfire-appliance, wildfire-appliance-cluster
xpaths_panorama_8_0 = '''
./devices/entry[@name='localhost.localdomain']/device-group
./devices/entry[@name='localhost.localdomain']/deviceconfig
./devices/entry[@name='localhost.localdomain']/log-collector
./devices/entry[@name='localhost.localdomain']/log-collector-group
./devices/entry[@name='localhost.localdomain']/plugins
./devices/entry[@name='localhost.localdomain']/template
./devices/entry[@name='localhost.localdomain']/template-stack
./devices/entry[@name='localhost.localdomain']/wildfire-appliance
./devices/entry[@name='localhost.localdomain']/wildfire-appliance-cluster
./mgt-config
./panorama
./readonly
./shared
'''
xpaths_panos = xpaths_panos_4_1
xpaths_panos_multi_vsys = xpaths_panos_multi_vsys_4_1
xpaths_panorama = xpaths_panorama_4_1
if self.config_version() is not None:
if self.config_version() in ['5.0.0', '5.1.0']:
xpaths_panos = xpaths_panos_5_0
xpaths_panorama = xpaths_panorama_5_0
elif self.config_version() in ['6.0.0']:
xpaths_panos = xpaths_panos_6_0
xpaths_panorama = xpaths_panorama_6_0
elif self.config_version() in ['6.1.0']:
xpaths_panos = xpaths_panos_6_0
xpaths_panorama = xpaths_panorama_6_1
elif self.config_version() in ['7.0.0']:
xpaths_panos = xpaths_panos_7_0
xpaths_panorama = xpaths_panorama_7_0
elif self.config_version() in ['7.1.0']:
xpaths_panos = xpaths_panos_7_1
xpaths_panorama = xpaths_panorama_7_0
elif self.config_version() in ['8.0.0', '8.1.0', '9.0.0']:
xpaths_panos = xpaths_panos_8_0
xpaths_panorama = xpaths_panorama_8_0
elif self.config_version() in ['9.1.0']:
xpaths_panos = xpaths_panos_9_1
xpaths_panorama = xpaths_panorama_8_0
if self.config_multi_vsys():
xpaths = xpaths_panos_multi_vsys
elif self.config_panorama():
xpaths = xpaths_panorama
else:
xpaths = xpaths_panos
self._log(DEBUG2, '%s', xpaths)
xpaths = xpaths.split('\n')
xpaths = [s for s in xpaths if s]
self._log(DEBUG1, 'xpaths: %d', len(xpaths))
return xpaths
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# Reference implementation of depthwise_conv2d
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-4)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "producing output", output_size,
"stride:", stride, "padding:", padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
def testDepthwiseConv2DFilterGradFormatNCHWCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradFormatNCHWCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "padding:", padding)
self._CompareBackpropFilter(
input_size,
filter_size,
output_size,
stride,
padding,
data_format="NCHW")
if __name__ == "__main__":
test.main()
|
|
"""
Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import tempfile
import shutil
import atexit
import textwrap
import re
import pytest
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
try:
from hashlib import md5
except ImportError:
from md5 import new as md5 # noqa: F401
#
# Maintaining a temporary module directory
#
_module_dir = None
_module_num = 5403
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except (IOError, OSError):
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
global _module_num
d = get_module_dir()
name = "_test_ext_module_%d" % _module_num
_module_num += 1
if name in sys.modules:
# this should not be possible, but check anyway
raise RuntimeError("Temporary module name already in use.")
return name
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
"f2py2e.main()" % repr(sys.path))
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap')
if os.path.isfile(fn):
dst = os.path.join(d, os.path.basename(fn))
if not os.path.isfile(dst):
shutil.copyfile(fn, dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
f2py_opts = ['-c', '-m', module_name] + options + dst_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
f2py_opts += ['only:'] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, '-c', code] + f2py_opts
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running f2py failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
return import_module(module_name)
@_memoize
def build_code(source_code, options=[], skip=[], only=[], suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = '.f'
with temppath(suffix=suffix) as path:
with open(path, 'w') as f:
f.write(source_code)
return build_module([path], options=options, skip=skip, only=only,
module_name=module_name)
#
# Check if compilers are available at all...
#
_compiler_status = None
def _get_compiler_status():
global _compiler_status
if _compiler_status is not None:
return _compiler_status
_compiler_status = (False, False, False)
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
global config
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
return config
from numpy.distutils.core import setup
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
have_c = config_cmd.try_compile('void foo() {}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
sys.exit(99)
""")
code = code % dict(syspath=repr(sys.path))
with temppath(suffix='.py') as script:
with open(script, 'w') as f:
f.write(code)
cmd = [sys.executable, script, 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
if m:
_compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
bool(int(m.group(3))))
# Finished
return _compiler_status
def has_c_compiler():
return _get_compiler_status()[0]
def has_f77_compiler():
return _get_compiler_status()[1]
def has_f90_compiler():
return _get_compiler_status()[2]
#
# Building with distutils
#
@_memoize
def build_module_distutils(source_files, config_code, module_name, **kw):
"""
Build a module via distutils and import it.
"""
from numpy.distutils.misc_util import Configuration
from numpy.distutils.core import setup
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
%(config_code)s
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
""") % dict(config_code=config_code, syspath=repr(sys.path))
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
f = open(script, 'wb')
f.write(asbytes(code))
f.close()
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, script, 'build_ext', '-i']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running distutils build failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
#
# Unittest convenience
#
class F2PyTest(object):
code = None
sources = None
options = []
skip = []
only = []
suffix = '.f'
module = None
module_name = None
def setup(self):
if sys.platform == 'win32':
pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
pytest.skip("No C compiler available")
codes = []
if self.sources:
codes.extend(self.sources)
if self.code is not None:
codes.append(self.suffix)
needs_f77 = False
needs_f90 = False
for fn in codes:
if fn.endswith('.f'):
needs_f77 = True
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
pytest.skip("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
self.module = build_code(self.code, options=self.options,
skip=self.skip, only=self.only,
suffix=self.suffix,
module_name=self.module_name)
if self.sources is not None:
self.module = build_module(self.sources, options=self.options,
skip=self.skip, only=self.only,
module_name=self.module_name)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
========================
Minimal
========================
A simple HTTP request handler for HTTPServer.
Minimal serves files within a given directory, guessing their
MIME-type from their file extension.
Example Usage
-------------
See HTTPResourceGlue.py for how to use request handlers.
System Requirements
-------------------
This component requires a UNIX system to be run currently.
"""
import string, time, dircache, os
#from cgi import escape
from Axon.Ipc import producerFinished, shutdown
from Axon.Component import component
from Kamaelia.File.BetterReading import IntelligentFileReader
import Kamaelia.Protocol.HTTP.MimeTypes as MimeTypes
import Kamaelia.Protocol.HTTP.ErrorPages as ErrorPages
def sanitizeFilename(filename):
output = ""
for char in filename:
if char >= "0" and char <= "9": output += char
elif char >= "a" and char <= "z": output += char
elif char >= "A" and char <= "Z": output += char
elif char == "-" or char == "_" or char == ".": output += char
return output
def sanitizePath(uri): #needs work
outputpath = []
uri = uri.lstrip('/')
splitpath = string.split(uri, "/")
for directory in splitpath:
if directory == ".":
pass
elif directory == "..":
if len(outputpath) > 0: outputpath.pop()
else:
outputpath.append(directory)
outputpath = string.join(outputpath, "/")
return outputpath
# FIXME: I tend to dislike <X>Factory functions. It tends to miss what's
# FIXME: actually being done. What this is really doing is creating
# FIXME: PreconfiguredMinimalHandlers for a specific server configuration.
# FIXME: As a result this name should change. Not a block to merge, but will
# FIXME: need revisiting really.
# FIXME: Also, this change strikes me as change for change's sake :-/
def MinimalFactory(indexfilename='index.html', homedirectory='htdocs'):
def _getMinimal(request):
return Minimal(request, indexfilename, homedirectory)
return _getMinimal
# old setup used functions - this needs to be converted to work with
# the new component-based handler system
#def websiteListFilesPage(directory):
# files = dircache.listdir(homedirectory + directory)
# data = u"<html>\n<title>" + directory + u"</title>\n<body style='background-color: black; color: white;'>\n<h2>" + #directory + u"</h2>\n<p>Files</p><ul>"
#
#
# for entry in files:
# data += u"<li><a href=\"" + directory + entry + u"\">" + entry + u"</a></li>\n"
# data += u"</ul></body>\n</html>\n\n"
#
# return {
# "statuscode" : "200",
# "data" : data,
# "content-type" : "text/html"
# }
# a one shot request handler
class Minimal(component):
"""\
A simple HTTP request handler for HTTPServer which serves files within a
given directory, guessing their MIME-type from their file extension.
Arguments:
-- request - the request dictionary object that spawned this component
-- homedirectory - the path to prepend to paths requested
-- indexfilename - if a directory is requested, this file is checked for inside it, and sent if found
"""
Inboxes = {
"inbox" : "UNUSED",
"control" : "UNUSED",
"_fileread" : "File data",
"_filecontrol" : "Signals from file reader"
}
Outboxes = {
"outbox" : "Response dictionaries",
"signal" : "UNUSED",
"_fileprompt" : "Get the file reader to do some reading",
"_filesignal" : "Shutdown the file reader"
}
# FIXME: If this used inheritable defaults instead, this would actually
# FIXME: probably eliminate the need for the factory function above.
def __init__(self, request, indexfilename='index.html', homedirectory='htdocs/', **argd):
super(Minimal, self).__init__(**argd)
self.request = request
self.indexfilename = indexfilename
self.homedirectory = homedirectory
def main(self):
"""Produce the appropriate response then terminate."""
filename = sanitizePath(self.request["uri-suffix"])
if not (self.homedirectory.endswith('/') or filename.startswith('/')):
filepath = self.homedirectory + '/' + filename # FIXME: Should use os.path.join
else:
filepath = self.homedirectory + filename # FIXME: Should use os.path.join
# print filepath
# FIXME: Logic here looks a little bust actually, and can probably
# FIXME: be reworked further and simplified.
filetype = MimeTypes.workoutMimeType(filename)
error = None
try:
if os.path.exists(filepath):
if os.path.isdir(filepath):
filepath += self.indexfilename # FIXME: Assumes X/index.html always exists
resource = {
"content-type" : filetype,
"statuscode" : 200,
}
self.send(resource, "outbox")
else:
print "Error 404, " + filename + " is not a file"
print "self.homedirectory(%s) , filename(%s)" % (self.homedirectory , filename)
print "os.path.exists(self.homedirectory + filename)", os.path.exists(self.homedirectory + filename)
print "not os.path.isdir(self.homedirectory + filename)", (not os.path.isdir(self.homedirectory + filename))
error = 404
except OSError, e:
error = 404
if error == 404:
resource = ErrorPages.getErrorPage(404)
resource["incomplete"] = False
self.send(resource, "outbox")
self.send(producerFinished(self), "signal")
return
self.filereader = IntelligentFileReader(filepath, 50000, 10)
self.link((self, "_fileprompt"), (self.filereader, "inbox"))
self.link((self, "_filesignal"), (self.filereader, "control"))
self.link((self.filereader, "outbox"), (self, "_fileread"))
self.link((self.filereader, "signal"), (self, "_filecontrol"))
self.addChildren(self.filereader)
self.filereader.activate()
yield 1
done = False
while not done:
yield 1
while self.dataReady("_fileread") and len(self.outboxes["outbox"]) < 3:
msg = self.recv("_fileread")
resource = { "data" : msg }
self.send(resource, "outbox")
if len(self.outboxes["outbox"]) < 3:
self.send("GARBAGE", "_fileprompt") # we use this to wakeup the filereader
while self.dataReady("_filecontrol") and not self.dataReady("_fileread"):
msg = self.recv("_filecontrol")
if isinstance(msg, producerFinished):
done = True
self.pause()
self.send(producerFinished(self), "signal")
__kamaelia_components__ = ( Minimal, )
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status page handler for mapreduce framework."""
import os
import time
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import model
from google.appengine.ext.webapp import template
# TODO(user): a list of features we'd like to have in status page:
# - show sparklet of entities/sec on index page
# - shard bar chart should color finished shards differently
# mapreduce.yaml file names
MR_YAML_NAMES = ["mapreduce.yaml", "mapreduce.yml"]
class Error(Exception):
"""Base class for exceptions in this module."""
class BadStatusParameterError(Exception):
"""A parameter passed to a status handler was invalid."""
class BadYamlError(Error):
"""Raised when the mapreduce.yaml file is invalid."""
class MissingYamlError(BadYamlError):
"""Raised when the mapreduce.yaml file could not be found."""
class MultipleDocumentsInMrYaml(BadYamlError):
"""There's more than one document in mapreduce.yaml file."""
class UserParam(validation.Validated):
"""A user-supplied parameter to a mapreduce job."""
ATTRIBUTES = {
"name": r"[a-zA-Z0-9_\.]+",
"default": validation.Optional(r".*"),
"value": validation.Optional(r".*"),
}
class MapperInfo(validation.Validated):
"""Configuration parameters for the mapper part of the job."""
ATTRIBUTES = {
"handler": r".+",
"input_reader": r".+",
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapreduceInfo(validation.Validated):
"""Mapreduce description in mapreduce.yaml."""
ATTRIBUTES = {
"name": r".+",
"mapper": MapperInfo,
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapReduceYaml(validation.Validated):
"""Root class for mapreduce.yaml.
File format:
mapreduce:
- name: <mapreduce_name>
mapper:
- input_reader: google.appengine.ext.mapreduce.DatastoreInputReader
- handler: path_to_my.MapperFunction
- params:
- name: foo
default: bar
- name: blah
default: stuff
- params_validator: path_to_my.ValidatorFunction
Where
mapreduce_name: The name of the mapreduce. Used for UI purposes.
mapper_handler_spec: Full <module_name>.<function_name/class_name> of
mapper handler. See MapreduceSpec class documentation for full handler
specification.
input_reader: Full <module_name>.<function_name/class_name> of the
InputReader sub-class to use for the mapper job.
params: A list of optional parameter names and optional default values
that may be supplied or overridden by the user running the job.
params_validator is full <module_name>.<function_name/class_name> of
a callable to validate the mapper_params after they are input by the
user running the job.
"""
ATTRIBUTES = {
"mapreduce": validation.Optional(validation.Repeated(MapreduceInfo))
}
@staticmethod
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
all_configs.append(out)
return all_configs
# N.B. Sadly, we currently don't have and ability to determine
# application root dir at run time. We need to walk up the directory structure
# to find it.
def find_mapreduce_yaml(status_file=__file__):
"""Traverse directory trees to find mapreduce.yaml file.
Begins with the location of status.py and then moves on to check the working
directory.
Args:
status_file: location of status.py, overridable for testing purposes.
Returns:
the path of mapreduce.yaml file or None if not found.
"""
checked = set()
yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)
if not yaml:
yaml = _find_mapreduce_yaml(os.getcwd(), checked)
return yaml
def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None
def parse_mapreduce_yaml(contents):
"""Parses mapreduce.yaml file contents.
Args:
contents: mapreduce.yaml file contents.
Returns:
MapReduceYaml object with all the data from original file.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file.
"""
try:
builder = yaml_object.ObjectBuilder(MapReduceYaml)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(contents)
mr_info = handler.GetResults()
except (ValueError, yaml_errors.EventError), e:
raise BadYamlError(e)
if len(mr_info) < 1:
raise BadYamlError("No configs found in mapreduce.yaml")
if len(mr_info) > 1:
raise MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info))
jobs = mr_info[0]
job_names = set(j.name for j in jobs.mapreduce)
if len(jobs.mapreduce) != len(job_names):
raise BadYamlError("Overlapping mapreduce names; names must be unique")
return jobs
def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close()
class ResourceHandler(base_handler.BaseHandler):
"""Handler for static resources."""
_RESOURCE_MAP = {
"status": ("overview.html", "text/html"),
"detail": ("detail.html", "text/html"),
"base.css": ("base.css", "text/css"),
"jquery.js": ("jquery-1.4.2.min.js", "text/javascript"),
"status.js": ("status.js", "text/javascript"),
}
def get(self, relative):
if relative not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write("Resource not found.")
return
real_path, content_type = self._RESOURCE_MAP[relative]
path = os.path.join(os.path.dirname(__file__), "static", real_path)
self.response.headers["Cache-Control"] = "public; max-age=300"
self.response.headers["Content-Type"] = content_type
self.response.out.write(open(path).read())
class ListConfigsHandler(base_handler.GetJsonHandler):
"""Lists mapreduce configs as JSON for users to start jobs."""
def handle(self):
self.json_response["configs"] = MapReduceYaml.to_dict(get_mapreduce_yaml())
class ListJobsHandler(base_handler.GetJsonHandler):
"""Lists running and completed mapreduce jobs for an overview as JSON."""
def handle(self):
cursor = self.request.get("cursor")
count = int(self.request.get("count", "50"))
query = model.MapreduceState.all()
if cursor:
query.filter("__key__ >=", db.Key(cursor))
query.order("__key__")
jobs_list = query.fetch(count + 1)
if len(jobs_list) == (count + 1):
self.json_response["cursor"] = str(jobs_list[-1].key())
jobs_list = jobs_list[:-1]
all_jobs = []
for job in jobs_list:
out = {
# Data shared between overview and detail pages.
"name": job.mapreduce_spec.name,
"mapreduce_id": job.mapreduce_spec.mapreduce_id,
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to overview page.
"chart_url": job.sparkline_url,
"active_shards": job.active_shards,
"shards": job.mapreduce_spec.mapper.shard_count,
}
if job.result_status:
out["result_status"] = job.result_status
all_jobs.append(out)
self.json_response["jobs"] = all_jobs
class GetJobDetailHandler(base_handler.GetJsonHandler):
"""Retrieves the details of a mapreduce job as JSON."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
if not mapreduce_id:
raise BadStatusParameterError("'mapreduce_id' was invalid")
job = model.MapreduceState.get_by_key_name(mapreduce_id)
if job is None:
raise KeyError("Could not find job with ID %r" % mapreduce_id)
self.json_response.update(job.mapreduce_spec.to_json())
self.json_response.update(job.counters_map.to_json())
self.json_response.update({
# Shared with overview page.
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to detail page.
"chart_url": job.chart_url,
})
self.json_response["result_status"] = job.result_status
shards_list = model.ShardState.find_by_mapreduce_id(mapreduce_id)
all_shards = []
shards_list.sort(key=lambda x: x.shard_number)
for shard in shards_list:
out = {
"active": shard.active,
"result_status": shard.result_status,
"shard_number": shard.shard_number,
"shard_id": shard.shard_id,
"updated_timestamp_ms":
int(time.mktime(shard.update_time.utctimetuple()) * 1000),
"shard_description": shard.shard_description,
"last_work_item": shard.last_work_item,
}
out.update(shard.counters_map.to_json())
all_shards.append(out)
self.json_response["shards"] = all_shards
|
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import copy
import json
import sys
# These fields must appear in the test result output
REQUIRED = {
'interrupted',
'num_failures_by_type',
'seconds_since_epoch',
'tests',
}
# These fields are optional, but must have the same value on all shards
OPTIONAL_MATCHING = (
'builder_name',
'build_number',
'chromium_revision',
'has_pretty_patch',
'has_wdiff',
'path_delimiter',
'pixel_tests_enabled',
'random_order_seed'
)
# The last shard's value for these fields will show up in the merged results
OPTIONAL_IGNORED = (
'layout_tests_dir',
'metadata'
)
# These fields are optional and will be summed together
OPTIONAL_COUNTS = (
'fixable',
'num_flaky',
'num_passes',
'num_regressions',
'skipped',
'skips',
)
class MergeException(Exception):
pass
def merge_test_results(shard_results_list):
""" Merge list of results.
Args:
shard_results_list: list of results to merge. All the results must have the
same format. Supported format are simplified JSON format & Chromium JSON
test results format version 3 (see
https://www.chromium.org/developers/the-json-test-results-format)
Returns:
a dictionary that represent the merged results. Its format follow the same
format of all results in |shard_results_list|.
"""
shard_results_list = [x for x in shard_results_list if x]
if not shard_results_list:
return {}
if 'seconds_since_epoch' in shard_results_list[0]:
return _merge_json_test_result_format(shard_results_list)
else:
return _merge_simplified_json_format(shard_results_list)
def _merge_simplified_json_format(shard_results_list):
# This code is specialized to the "simplified" JSON format that used to be
# the standard for recipes.
# These are the only keys we pay attention to in the output JSON.
merged_results = {
'successes': [],
'failures': [],
'valid': True,
}
for result_json in shard_results_list:
successes = result_json.get('successes', [])
failures = result_json.get('failures', [])
valid = result_json.get('valid', True)
if (not isinstance(successes, list) or not isinstance(failures, list) or
not isinstance(valid, bool)):
raise MergeException(
'Unexpected value type in %s' % result_json) # pragma: no cover
merged_results['successes'].extend(successes)
merged_results['failures'].extend(failures)
merged_results['valid'] = merged_results['valid'] and valid
return merged_results
def _merge_json_test_result_format(shard_results_list):
# This code is specialized to the Chromium JSON test results format version 3:
# https://www.chromium.org/developers/the-json-test-results-format
# These are required fields for the JSON test result format version 3.
merged_results = {
'tests': {},
'interrupted': False,
'version': 3,
'seconds_since_epoch': float('inf'),
'num_failures_by_type': {
}
}
# To make sure that we don't mutate existing shard_results_list.
shard_results_list = copy.deepcopy(shard_results_list)
for result_json in shard_results_list:
# TODO(tansell): check whether this deepcopy is actually necessary.
result_json = copy.deepcopy(result_json)
# Check the version first
version = result_json.pop('version', -1)
if version != 3:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Unsupported version %s. Only version 3 is supported' % version)
# Check the results for each shard have the required keys
missing = REQUIRED - set(result_json)
if missing:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Invalid json test results (missing %s)' % missing)
# Curry merge_values for this result_json.
merge = lambda key, merge_func: merge_value(
result_json, merged_results, key, merge_func)
# Traverse the result_json's test trie & merged_results's test tries in
# DFS order & add the n to merged['tests'].
merge('tests', merge_tries)
# If any were interrupted, we are interrupted.
merge('interrupted', lambda x,y: x|y)
# Use the earliest seconds_since_epoch value
merge('seconds_since_epoch', min)
# Sum the number of failure types
merge('num_failures_by_type', sum_dicts)
# Optional values must match
for optional_key in OPTIONAL_MATCHING:
if optional_key not in result_json:
continue
if optional_key not in merged_results:
# Set this value to None, then blindly copy over it.
merged_results[optional_key] = None
merge(optional_key, lambda src, dst: src)
else:
merge(optional_key, ensure_match)
# Optional values ignored
for optional_key in OPTIONAL_IGNORED:
if optional_key in result_json:
merged_results[optional_key] = result_json.pop(
# pragma: no cover (covered by
# results_merger_unittest).
optional_key)
# Sum optional value counts
for count_key in OPTIONAL_COUNTS:
if count_key in result_json: # pragma: no cover
# TODO(mcgreevy): add coverage.
merged_results.setdefault(count_key, 0)
merge(count_key, lambda a, b: a+b)
if result_json:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
'Unmergable values %s' % list(result_json.keys()))
return merged_results
def merge_tries(source, dest):
""" Merges test tries.
This is intended for use as a merge_func parameter to merge_value.
Args:
source: A result json test trie.
dest: A json test trie merge destination.
"""
# merge_tries merges source into dest by performing a lock-step depth-first
# traversal of dest and source.
# pending_nodes contains a list of all sub-tries which have been reached but
# need further merging.
# Each element consists of a trie prefix, and a sub-trie from each of dest
# and source which is reached via that prefix.
pending_nodes = [('', dest, source)]
while pending_nodes:
prefix, dest_node, curr_node = pending_nodes.pop()
for k, v in curr_node.items():
if k in dest_node:
if not isinstance(v, dict):
raise MergeException(
"%s:%s: %r not mergable, curr_node: %r\ndest_node: %r" % (
prefix, k, v, curr_node, dest_node))
pending_nodes.append(("%s:%s" % (prefix, k), dest_node[k], v))
else:
dest_node[k] = v
return dest
def ensure_match(source, dest):
""" Returns source if it matches dest.
This is intended for use as a merge_func parameter to merge_value.
Raises:
MergeException if source != dest
"""
if source != dest:
raise MergeException( # pragma: no cover (covered by
# results_merger_unittest).
"Values don't match: %s, %s" % (source, dest))
return source
def sum_dicts(source, dest):
""" Adds values from source to corresponding values in dest.
This is intended for use as a merge_func parameter to merge_value.
"""
for k, v in source.items():
dest.setdefault(k, 0)
dest[k] += v
return dest
def merge_value(source, dest, key, merge_func):
""" Merges a value from source to dest.
The value is deleted from source.
Args:
source: A dictionary from which to pull a value, identified by key.
dest: The dictionary into to which the value is to be merged.
key: The key which identifies the value to be merged.
merge_func(src, dst): A function which merges its src into dst,
and returns the result. May modify dst. May raise a MergeException.
Raises:
MergeException if the values can not be merged.
"""
try:
dest[key] = merge_func(source[key], dest[key])
except MergeException as e:
e.message = "MergeFailure for %s\n%s" % (key, e.message)
e.args = tuple([e.message] + list(e.args[1:]))
raise
del source[key]
def main(files):
if len(files) < 2:
sys.stderr.write("Not enough JSON files to merge.\n")
return 1
sys.stderr.write('Starting with %s\n' % files[0])
result = json.load(open(files[0]))
for f in files[1:]:
sys.stderr.write('Merging %s\n' % f)
result = merge_test_results([result, json.load(open(f))])
print(json.dumps(result))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
from haystack.inputs import Raw
from haystack.query import SearchQuerySet, SQ
from haystack.views import SearchView
from reviewboard.accounts.decorators import check_login_required
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.search.indexes import BaseSearchIndex
from reviewboard.scmtools.models import Repository
from reviewboard.site.decorators import check_local_site_access
from reviewboard.site.urlresolvers import local_site_reverse
class RBSearchView(SearchView):
"""Provides search functionality for information on Review Board."""
template = 'search/results.html'
ADJACENT_PAGES = 5
FILTER_TYPES = [
{
'id': '',
'name': _('All Results'),
},
{
'id': 'users',
'model': User,
'name': _('Users'),
},
{
'id': 'reviewrequests',
'model': ReviewRequest,
'name': _('Review Requests'),
},
]
def __init__(self, *args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
self.enabled = siteconfig.get('search_enable')
super(RBSearchView, self).__init__(
load_all=False,
searchqueryset=SearchQuerySet,
results_per_page=siteconfig.get('search_results_per_page'),
*args, **kwargs)
def __call__(self, request, local_site=None, local_site_name=None,
*args, **kwargs):
"""Handles requests to this view.
This will first check if the search result is just a digit, which is
assumed to be a review request ID. If it is, the user will be
redirected to the review request.
Otherwise, the search will be carried out based on the query.
"""
self.request = request
self.local_site = local_site
query = self.get_query()
# If the query is an integer, then assume that it's a review request
# ID that we'll want to redirect to. This mirrors behavior we've had
# since Review Board 1.7.
if query.isdigit():
try:
review_request = ReviewRequest.objects.for_id(query,
local_site)
if review_request.is_accessible_by(self.request.user,
local_site=self.local_site,
request=self.request):
return HttpResponseRedirect(review_request.get_absolute_url())
except ReviewRequest.DoesNotExist:
pass
if not self.enabled:
return render(request, 'search/search_disabled.html')
return super(RBSearchView, self).__call__(request)
def get_query(self):
"""Return the normalized query string from the request."""
return self.request.GET.get('q', '').strip()
def get_results(self):
"""Return a set of results matching the query."""
sqs = self.searchqueryset()
sqs = sqs.filter(content=Raw(self.query))
# Filter the results by the user-requested set of models, if any.
self.active_filters = \
self.request.GET.get('filter', '').strip().split(',')
filter_models = [
filter_type['model']
for filter_type in self.FILTER_TYPES
if ('model' in filter_type and
filter_type['id'] in self.active_filters)
]
if filter_models:
sqs = sqs.models(*filter_models)
if self.local_site:
local_site_id = self.local_site.pk
else:
local_site_id = BaseSearchIndex.NO_LOCAL_SITE_ID
sqs = sqs.filter_and(local_sites__contains=local_site_id)
# Filter out any private review requests the user doesn't have
# access to.
user = self.request.user
if not user.is_superuser:
private_sq = (SQ(django_ct='reviews.reviewrequest') &
SQ(private=True))
if user.is_authenticated():
# We're going to build a series of queries that mimic the
# accessibility checks we have internally, based on the access
# permissions the user currently has, and the IDs listed in
# the indexed review request.
#
# This must always be kept in sync with
# ReviewRequestManager._query.
#
# Note that we are not performing Local Site checks here,
# because we're already filtering by Local Sites.
# Make sure they have access to the repository, if any.
accessible_repo_ids = \
list(Repository.objects.accessible_ids(
user, visible_only=False,
local_site=self.local_site))
accessible_group_ids = \
Group.objects.accessible_ids(user, visible_only=False)
repository_sq = \
SQ(private_repository_id__in=[0] + accessible_repo_ids)
# Next, build a query to see if the review request targets any
# invite-only groups the user is a member of.
target_groups_sq = SQ(private_target_groups__contains=0)
for pk in accessible_group_ids:
target_groups_sq |= SQ(private_target_groups__contains=pk)
# Build a query to see if the user is explicitly listed
# in the list of reviewers.
target_users_sq = SQ(target_users__contains=user.pk)
# And, of course, the owner of the review request can see it.
#
# With that, we'll put the whole query together, in the order
# matching ReviewRequest.is_accessible_by.
private_sq &= ~(SQ(username=user.username) |
(repository_sq &
(target_users_sq | target_groups_sq)))
sqs = sqs.exclude(private_sq)
return sqs.order_by('-last_updated')
def extra_context(self):
"""Return extra context for rendering the results list."""
return {
'hits_returned': len(self.results),
'filter_types': [
dict(active=(self.active_filters == [filter_type['id']]),
**filter_type)
for filter_type in self.FILTER_TYPES
],
}
def create_response(self):
"""Create a response based on the search results."""
if not self.query:
return HttpResponseRedirect(
local_site_reverse('all-review-requests',
request=self.request))
paginator, page = self.build_page()
page_nums = range(max(1, page.number - self.ADJACENT_PAGES),
min(paginator.num_pages,
page.number + self.ADJACENT_PAGES) + 1)
context = {
'query': self.query,
'page': page,
'paginator': paginator,
'is_paginated': page.has_other_pages(),
'show_first_page': 1 not in page_nums,
'show_last_page': paginator.num_pages not in page_nums,
'page_numbers': page_nums,
}
context.update(self.extra_context())
return render_to_response(
self.template, context,
context_instance=self.context_class(self.request))
@check_login_required
@check_local_site_access
def search(*args, **kwargs):
"""Provide results for a given search."""
search_view = RBSearchView()
return search_view(*args, **kwargs)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs", trainable=False)
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def input_producer(input_tensor, element_shape=None, num_epochs=None,
shuffle=True, seed=None, capacity=32, shared_name=None,
summary_name=None, name=None):
"""Output the rows of `input_tensor` to a queue for an input pipeline.
Args:
input_tensor: A tensor with the rows to produce. Must be at
one-dimensional. Must either have a fully-defined shape, or
`element_shape` must be defined.
element_shape: (Optional.) A `TensorShape` representing the shape of a
row of `input_tensor`, if it cannot be inferred.
num_epochs: (Optional.) An integer. If specified `input_producer` produces
each row of `input_tensor` `num_epochs` times before generating an
`OutOfRange` error. If not specified, `input_producer` can cycle through
the rows of `input_tensor` an unlimited number of times.
shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
within each eopch.
seed: (Optional.) An integer. The seed to use if `shuffle` is true.
capacity: (Optional.) The capacity of the queue to be used for buffering
the input.
shared_name: (Optional.) If set, this queue will be shared under the given
name across multiple sessions.
summary_name: (Optional.) If set, a scalar summary for the current queue
size will be generated, using this name as part of the tag.
name: (Optional.) A name for queue.
Returns:
A queue with the output rows. A `QueueRunner` for the queue is
added to the current `QUEUE_RUNNER` collection of the current
graph.
Raises:
ValueError: If the shape of the input cannot be inferred from the arguments.
"""
with ops.op_scope([input_tensor], name, "input_producer"):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
if not element_shape.is_fully_defined():
raise ValueError("Either `input_tensor` must have a fully defined shape "
"or `element_shape` must be specified")
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[input_tensor.dtype.base_dtype],
shapes=[element_shape],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
if summary_name is not None:
logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, shared_name=None, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an `OutOfRange` error. If not specified,
`string_input_producer` can cycle through the strings in `string_tensor`
an unlimited number of times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Raises:
ValueError: If the string_tensor is a null Python list. At runtime,
will fail with an assertion if string_tensor becomes a null tensor.
"""
not_null_err = "string_input_producer requires a non-null input tensor"
if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
raise ValueError(not_null_err)
with ops.op_scope([string_tensor], name, "input_producer") as name:
string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
with ops.control_dependencies([
logging_ops.Assert(math_ops.greater(array_ops.size(string_tensor), 0),
[not_null_err])]):
string_tensor = array_ops.identity(string_tensor)
return input_producer(
input_tensor=string_tensor,
element_shape=[],
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
capacity=capacity,
shared_name=shared_name,
name=name,
summary_name="fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, name, "fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
Raises:
ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
class _SparseMetaData(object):
"""Store information about the Tensor: Is it sparse?, dtype, and rank."""
def __init__(self, sparse, dtype, rank):
self._sparse = sparse
self._dtype = dtype
self._rank = rank
def __eq__(self, other):
if self.sparse != other.sparse:
return False
if not self.sparse:
return True
if self.dtype != other.dtype:
return False
if not self.rank.is_compatible_with(other.rank):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "[SparseMetaData(%s, %s, %s)]" % (self.sparse, self.dtype, self.rank)
def merge_with(self, other):
if self != other:
raise ValueError("SparseMetaData objects are incompatible: %s vs. %s"
% (self, other))
if self.sparse:
self.rank.merge_with(other.rank)
return self
@property
def dtype(self):
return self._dtype
@property
def sparse(self):
return self._sparse
@property
def rank(self):
return self._rank
def _serialize_sparse_tensors(tensor_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch, etc."""
sparse_info_list = [
_SparseMetaData(sparse=True,
dtype=t.dtype,
rank=t.shape.get_shape().with_rank(1)[0])
if isinstance(t, ops.SparseTensor)
else _SparseMetaData(False, None, None)
for t in tensor_list]
def _maybe_serialize(t, sparse):
if not sparse:
return t
return (sparse_ops.serialize_many_sparse(t) if enqueue_many
else sparse_ops.serialize_sparse(t))
serialized_list = [
_maybe_serialize(t, info.sparse) for (t, info)
in zip(tensor_list, sparse_info_list)]
return serialized_list, sparse_info_list
def _serialize_sparse_tensors_join(tensor_list_list, enqueue_many):
"""Serialize SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _serialize_sparse_tensors(
tensor_list_list[0], enqueue_many)
serialized_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _serialize_sparse_tensors(
tensor_list, enqueue_many)
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
serialized_list_list.append(s)
return (serialized_list_list, sparse_info_list)
def _deserialize_sparse_tensors(serialized_list, sparse_info_list):
"""Deserialize SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(serialized_list, collections.Sequence)
if not received_sequence:
serialized_list = (serialized_list,)
tensors = [
sparse_ops.deserialize_many_sparse(s, info.dtype, info.rank.value)
if info.sparse else s
for (s, info)
in zip(serialized_list, sparse_info_list)]
return tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
if shapes is None:
l = len(tensor_list_list[0])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(l)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
# Batching functions ----------------------------------------------------------
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None,
shared_name=None, name=None):
"""Creates batches of tensors in `tensor_list`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`. The `capacity` argument
controls the how long the prefetching is allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
(tensor_list, sparse_info) = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return dequeued
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
`tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, sparse_info = _serialize_sparse_tensors_join(
tensor_list_list, enqueue_many)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
logging_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return dequeued
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list`.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
tensor_list, sparse_info = _serialize_sparse_tensors(
tensor_list, enqueue_many)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return dequeued
def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list_list`.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y,
z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
tensor_list_list, sparse_info = _serialize_sparse_tensors_join(
tensor_list_list, enqueue_many)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _deserialize_sparse_tensors(dequeued, sparse_info)
return dequeued
|
|
import sys
import time
import xmpp
from PyGtalkRobot import GtalkRobot
roster = [] #all people online, not just logged in
users = {} #keep track of all users logged in here for reply to all
nicks = []
killList = {}
class polbot(GtalkRobot):
status = "Pol Bot"
#def command_priority###_name
def echoToAll(self, message):
for user in users:
self.replyMessage(users[user][0], message)
def echoToEveryoneElse(self, message, userME):
for user in users:
if user != userME.getStripped():
self.replyMessage(users[user][0], message)
def sendCommands(self, user):
self.replyMessage(user, "HELP: Valid commands are /me, /who, /whois, /join, /quit, /help, /topic and /nick")
self.replyMessage(user, "HELP: If your client filters out /, use # (ie #me, #who...)")
def command_001_join(self, user, message, args):
'''^[/#]join( .*)?$(?i)'''
if user.getStripped() not in users:
nick = user.getStripped()
if nick.find('@') is not -1:
nick = nick[0:nick.find('@')]
self.replyMessage(user, "You have joined #khmer")
self.sendCommands(user)
self.echoToAll(nick + " has joined #khmer")
users[user.getStripped()] = [user, nick]
nicks.append(nick)
def command_002_quit(self, user, message, args):
'''^[/#]quit( .*)?$(?i)'''
if user.getStripped() in users:
self.echoToAll(users[user.getStripped()][1] + " has left #khmer")
nicks.remove(users[user.getStripped()][1])
del users[user.getStripped()]
def command_007_nick(self, user, message, args):
'''^[/#]nick ([a-zA-Z0-9]+)$(?i)'''
if user.getStripped() in users:
#import pdb; pdb.set_trace()
newnick = args[0]
if len(args[0]) > 15:
newnick = args[0][0:15]
if newnick.lower() in map(lower, nicks):
self.replyMessage(user, "NICK: Someone already exists with that nickname. Cleanse the population of them first")
else:
oldnick = users[user.getStripped()][1]
users[user.getStripped()][1] = newnick
self.echoToAll("NICK: " + oldnick + " changed nickname to " + newnick)
nicks.remove(oldnick)
nicks.append(newnick)
def command_009_nickfail(self, user, message, args):
'''^[/#]nick.*(?i)'''
if user.getStripped() in users:
self.replyMessage(user, "Invalid nick. Only A-Z and numbers are allowed")
def command_010_who(self, user, message, args):
'''^[/#]who( .*)?$(?i)'''
everybody = "CURRENT KHMER ROUGE MEMBERS ARE: "
for person in users:
everybody = everybody + users[person][1] + ", "
everybody = everybody[0:-1]
self.replyMessage(user, everybody)
def command_011_whois(self, user, message, args):
'''^[/#]whois( [a-zA-Z0-9]+)?$(?i)'''
if args[0]:
nickToLookup = args[0].lstrip()
if nickToLookup in nicks:
for key in users:
if users[key][1] == nickToLookup:
self.replyMessage(user, "WHOIS: " + nickToLookup + "'s gmail handle is " + users[key][0].getStripped())
else:
self.replyMessage(user, "WHOIS: No user nicknamed " + nickToLookup)
else:
self.replyMessage(user, "WHOIS: Command format is /whois NICK")
def command_015_topic(self, user, message, args):
'''^[/#]topic (.*)$(?i)'''
if user.getStripped() in users:
self.status = args[0]
self.setState("default", args[0])
self.echoToAll(users[user.getStripped()][1] + " changed topic to '" + args[0] +"'")
def command_020_help(self, user, message, args):
'''[/#]help( .*)?$(?i)'''
self.sendCommands(user)
def command_020_me(self, user, message, args):
'''[/#]me( .*)?$(?i)'''
if user.getStripped() in users:
if args[0]:
self.echoToAll("*"+ users[user.getStripped()][1] + args[0])
def command_021_ohno(self, user, message, args):
'''[#/]o\\\\( .*)?'''
if user.getStripped() in users:
if args[0]:
restOfLine = args[0]
else:
restOfLine = ""
#self.echoToEveryoneElse("<"+ users[user.getStripped()][1] + "> /o\\" + restOfLine, user)
if restOfLine:
restOfLine = restOfLine + "!?!?!?!"
self.echoToAll("<polbot> /o\\ /o\\" + restOfLine)
def command_030_poke(self, user, message, args):
'''[/#]poke (.*)$(?i)'''
if "desultir" in user.getStripped():
for connection in roster:
if args[0] in str(connection):
self.replyMessage(connection, "oi")
def command_030_kick(self, user, message, args):
'''[/#]kick (.*)$(?i)'''
if "desultir" in user.getStripped():
for nick in nicks:
if args[0] in nick:
self.echoToAll(" * "+ nick + " was kicked out of the Khmer Rouge by "+ users[user.getStripped()][1])
nicks.remove(nick)
for user, rec in users.items():
if args[0] in rec[1]:
del(users[user])
def command_030_roster(self, user, message, args):
'''[/#]roster'''
if "desultir" in user.getStripped():
rosterString = "ROSTER: "
for connection in roster:
rosterString = rosterString + " " + str(connection)
self.replyMessage(user, rosterString)
def command_080_echofail(self, user, message, args):
'''^<.*'''
None
def command_099_commandfail(self, user, message, args):
'''^(/.*)'''
self.replyMessage(user, "UNKNOWN COMMAND: " + args[0])
self.sendCommands(user)
#someone typed something non-command, echo to all
def command_100_default(self, user, message, args):
'''(.*)'''
if killList:
delete = []
for kill in killList:
if int(time.time()) - killList[kill] > 30:
#user timed out
if kill in users:
self.echoToEveryoneElse(users[kill][1] + " has left #Khmer - timed out", users[kill][0])
nicks.remove(users[kill][1])
del users[kill]
delete.append(kill)
for item in delete:
del killList[item]
if user.getStripped() in users:
self.echoToEveryoneElse("<"+ users[user.getStripped()][1] + "> " + message, user)
else:
self.replyMessage(user, "You are not currently in channel. Type /join or #join to join")
def presenceHandler(self, conn, presence):
UID = self.getUIDfromPresence(presence)
if presence.getType()=='unavailable':
print presence.getFrom(), ",", presence.getType()
connectioncount = 0
if presence.getFrom() in roster:
roster.remove(presence.getFrom())
for connection in roster:
connectionString = str(connection)
if connectionString[0:connectionString.find('/')] == UID:
if connection != presence.getFrom():
#user still connected from somewhere else
connectioncount = connectioncount + 1
if connectioncount == 0:
#wait 30 seconds
#then kill
#import pdb; pdb.set_trace()
killList[UID] = int(time.time())
elif presence.getType()=='subscribe':
jid = presence.getFrom().getStripped()
self.authorize(jid)
else:
print presence.getFrom(), ",", presence.getType()
if UID in killList:
del killList[UID]
if presence.getFrom() not in roster:
roster.append(presence.getFrom())
#if "desultir" in presence.getFrom().getStripped():
#self.replyMessage(presence.getFrom(), "hai")
GtalkRobot.presenceHandler(self, conn, presence)
def getUIDfromPresence(self, presence):
UID = str(presence.getFrom())
return UID[0:UID.find('/')]
def lower(input):
return input.lower()
if __name__ == "__main__":
bot = polbot()
conf = open('config.txt', 'r')
username = conf.readline().strip()
password = conf.readline().strip()
import pdb; pdb.set_trace()
conf.close()
bot.setState('default', bot.status)
while True:
try:
bot.start(username, password)
except xmpp.protocol.SeeOtherHost:
print sys.exc_info()[0]
pass
import xmpp
log = open("log.txt", 'w+')
log.write("crash")
log.close()
|
|
"""
mingprovider Module
This contains the class which allows sprox to interface with any database.
Copyright © 2009 Jorge Vargas
Original Version by Jorge Vargas 2009
Released under MIT license.
"""
from bson.errors import InvalidId
from sprox.iprovider import IProvider
from sprox.util import timestamp
import datetime, inspect
try:
from ming.odm import mapper, ForeignIdProperty, FieldProperty, RelationProperty
from ming.odm.declarative import MappedClass
from ming.odm.property import OneToManyJoin, ManyToOneJoin, ORMProperty
except ImportError: #pragma nocover
from ming.orm import mapper, ForeignIdProperty, FieldProperty, RelationProperty
from ming.orm.declarative import MappedClass
from ming.orm.property import OneToManyJoin, ManyToOneJoin, ORMProperty
from ming import schema as S
import bson
from bson import ObjectId
import re
from .widgetselector import MingWidgetSelector
from .validatorselector import MingValidatorSelector
from pymongo import ASCENDING, DESCENDING
class MingProvider(IProvider):
default_widget_selector_type = MingWidgetSelector
default_validator_selector_type = MingValidatorSelector
def __init__(self, hint, **hints):
self.session = hint
def get_field(self, entity, name):
"""Get a field with the given field name."""
return mapper(entity).property_index[name]
def get_fields(self, entity):
"""Get all of the fields for a given entity."""
if inspect.isfunction(entity):
entity = entity()
return [prop.name for prop in mapper(entity).properties if isinstance(prop, ORMProperty)]
@property
def _entities(self):
entities = getattr(self, '__entities', None)
if entities is None:
entities = dict(((m.mapped_class.__name__, m) for m in MappedClass._registry.values()))
self.__entities = entities
return entities
def get_entity(self, name):
"""Get an entity with the given name."""
return self._entities[name].mapped_class
def get_entities(self):
"""Get all entities available for this provider."""
return iter(self._entities.keys())
def get_primary_fields(self, entity):
"""Get the fields in the entity which uniquely identifies a record."""
return [self.get_primary_field(entity)]
def get_primary_field(self, entity):
"""Get the single primary field for an entity"""
return '_id'
def _get_meta(self, entity, field_name, metaprop):
"""Returns the value of the given sprox meta property for the field."""
field = self.get_field(entity, field_name)
return getattr(field, "sprox_meta", {}).get(metaprop, None)
def get_view_field_name(self, entity, possible_names=None):
"""Get the name of the field which first matches the possible colums
:Arguments:
entity
the entity where the field is located
possible_names
a list of names which define what the view field may contain. This allows the first
field that has a name in the list of names will be returned as the view field.
"""
if possible_names is None:
possible_names = ('_name', 'name', 'description', 'title')
fields = self.get_fields(entity)
for field in fields:
if self._get_meta(entity, field, 'title'):
return field
view_field = None
for column_name in possible_names:
for actual_name in fields:
if column_name == actual_name:
view_field = actual_name
break
if view_field:
break;
for actual_name in fields:
if column_name in actual_name:
view_field = actual_name
break
if view_field:
break;
if view_field is None:
view_field = fields[0]
return view_field
def get_dropdown_options(self, entity_or_field, field_name, view_names=None):
"""Get all dropdown options for a given entity field.
:Arguments:
entity_or_field
either the entity where the field is located, or the field itself
field_name
if the entity is specified, name of the field in the entity. Otherwise, None
view_names
a list of names which define what the view field may contain. This allows the first
field that has a name in the list of names will be returned as the view field.
:Returns:
A list of tuples with (id, view_value) as items.
"""
if field_name is not None:
field = self.get_field(entity_or_field, field_name)
else:
field = entity_or_field
if isinstance(field, FieldProperty):
field_type = getattr(field, 'field_type', None)
if field_type is None:
f = getattr(field, 'field', None)
if f is not None:
field = field.field
field_type = field.type
schemaitem = field_type
if isinstance(schemaitem, S.OneOf):
return [ (opt,opt) for opt in schemaitem.options ]
raise NotImplementedError("get_dropdown_options doesn't know how to get the options for field %r of type %r" % (field, schemaitem))
if not isinstance(field, RelationProperty):
raise NotImplementedError("get_dropdown_options expected a FieldProperty or RelationProperty field, but got %r" % field)
try:
join = field.join
iter = join.rel_cls.query.find()
rel_cls = join.rel_cls
#this seems like a work around for a bug in ming.
except KeyError: # pragma: no cover
join = field.related
iter = join.query.find()
rel_cls = join
view_field = self.get_view_field_name(rel_cls, view_names)
return [ (str(obj._id), getattr(obj, view_field)) for obj in iter ]
def get_relations(self, entity):
"""Get all of the field names in an enity which are related to other entities."""
return [prop.name for prop in mapper(entity).properties if isinstance(prop, RelationProperty)]
def is_relation(self, entity, field_name):
"""Determine if a field is related to a field in another entity."""
return isinstance(self.get_field(entity, field_name), RelationProperty)
def is_query(self, entity, value):
"""determines if a field is a query instead of actual list of data"""
#Currently not supported in MING
return False
def is_nullable(self, entity, field_name):
"""Determine if a field is nullable."""
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
return not getattr(fld, 'kwargs', {}).get("required", False)
def get_field_default(self, field):
field = getattr(field, 'field', None)
if field is not None:
if_missing = field.schema.if_missing
if if_missing is not None:
return (True, if_missing)
return (False, None)
def get_field_provider_specific_widget_args(self, entity, field, field_name):
return {}
def get_default_values(self, entity, params):
return params
def _related_object_id(self, value):
if isinstance(value, MappedClass):
return value._id
return ObjectId(value)
def _cast_value(self, entity, key, value):
#handles the case where an record with no id is being created
if key == '_id' and value == '':
value = ObjectId()
field = getattr(entity, key)
relations = self.get_relations(entity)
if key in relations:
related = field.related
if isinstance(value, list):
return related.query.find({'_id':{'$in':[self._related_object_id(i) for i in value]}}).all()
else:
return self.get_obj(related, {'_id':self._related_object_id(value)})
field = getattr(field, 'field', None)
if field is not None:
if field.type is S.DateTime or field.type is datetime.datetime:
if isinstance(value, str):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
else:
return value
elif field.type is S.Binary:
return bson.Binary(value)
elif field.type in (S.Int, int):
return int(value)
elif field.type is S.Bool:
if value in ('true', 'false'):
return value == 'true' and True or False
else:
return bool(value)
return value
def create(self, entity, params):
"""Create an entry of type entity with the given params."""
obj = entity()
fields = self.get_fields(entity)
for key, value in params.items():
if key not in fields:
continue
value = self._cast_value(entity, key, value)
if value is not None:
try:
setattr(obj,key,value)
except TypeError:
pass
self.flush()
return obj
def flush(self):
self.session.flush_all()
self.session.close_all()
def get_obj(self, entity, params, fields=None, omit_fields=None):
if '_id' in params:
return entity.query.find_by(_id=ObjectId(params['_id'])).first()
return entity.query.find_by(**params).first()
def get(self, entity, params, fields=None, omit_fields=None):
return self.dictify(self.get_obj(entity, params), fields, omit_fields)
def update(self, entity, params, omit_fields=None):
"""Update an entry of type entity which matches the params."""
obj = self.get_obj(entity, params)
params.pop('_id')
try:
params.pop('sprox_id')
except KeyError:
pass
try:
params.pop('_method')
except KeyError:
pass
fields = self.get_fields(entity)
for key, value in params.items():
if key not in fields:
continue
if omit_fields and key in omit_fields:
continue
value = self._cast_value(entity, key, value)
if value is not None:
try:
setattr(obj,key,value)
except TypeError:
pass
return obj
def delete(self, entity, params):
"""Delete an entry of typeentity which matches the params."""
obj = self.get_obj(entity, params)
obj.delete()
return obj
def query(self, entity, limit=None, offset=0, limit_fields=None,
order_by=None, desc=False, filters={},
substring_filters=[], **kw):
for field in substring_filters:
if self.is_string(entity, field):
filters[field] = {'$regex':re.compile(re.escape(filters[field]), re.IGNORECASE)}
if '_id' in filters:
try:
filters['_id'] = ObjectId(filters['_id'])
except InvalidId:
pass
iter = entity.query.find(filters)
if offset:
iter = iter.skip(int(offset))
if limit is not None:
iter = iter.limit(int(limit))
if order_by is not None:
if desc:
dir = DESCENDING
else:
dir = ASCENDING
iter.sort(order_by, dir)
count = iter.count()
return count, iter.all()
def is_string(self, entity, field_name):
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
fld = getattr(fld, 'field', None)
return isinstance(fld.schema, S.String)
def is_binary(self, entity, field_name):
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
fld = getattr(fld, 'field', None)
return isinstance(fld.schema,S.Binary)
def relation_fields(self, entity, field_name):
field = self.get_field(entity, field_name)
if not isinstance(field, RelationProperty):
raise TypeError("The field %r is not a relation field" % field)
#This is here for many-to-many turbogears-ming relations
if not field.join.prop:
return []
return [field.join.prop.name]
def relation_entity(self, entity, field_name):
"""If the field in the entity is a relation field, then returns the
entity which it relates to.
:Returns:
Related entity for the field
"""
field = self.get_field(entity, field_name)
return field.related
def get_field_widget_args(self, entity, field_name, field):
args = {}
args['provider'] = self
args['nullable'] = self.is_nullable(entity, field_name)
return args
def is_unique(self, entity, field_name, value):
iter = entity.query.find({ field_name: value })
return iter.count() == 0
def is_unique_field(self, entity, field_name):
for idx in getattr(entity.__mongometa__, "unique_indexes", ()):
if idx == (field_name,):
return True
return False
def dictify(self, obj, fields=None, omit_fields=None):
if obj is None:
return {}
r = {}
for prop in self.get_fields(obj.__class__):
if fields and prop not in fields:
continue
if omit_fields and prop in omit_fields:
continue
value = getattr(obj, prop)
if value is not None:
if self.is_relation(obj.__class__, prop):
klass = self.relation_entity(obj.__class__, prop)
pk_name = self.get_primary_field(klass)
if isinstance(value, list):
#joins
value = [getattr(value, pk_name) for value in value]
else:
#fks
value = getattr(value, pk_name)
r[prop] = value
return r
|
|
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError, FieldError
from django.db import models
from django.db.models import Q
from django.db.models.loading import get_model
from django.db.models.signals import m2m_changed
from django.forms import ModelForm
from cyder.base.constants import LEVELS, get_klasses
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.base.validators import validate_integer_field
from cyder.base.utils import transaction_atomic
from cyder.cydns.domain.models import Domain
from cyder.cydhcp.constants import DYNAMIC
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.workgroup.models import Workgroup
from cyder.cydns.ptr.models import BasePTR
from cyder.core.validation import validate_ctnr_name
class Ctnr(BaseModel, ObjectUrlMixin):
pretty_type = 'container'
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, unique=True,
validators=[validate_ctnr_name])
users = models.ManyToManyField(User, null=False, related_name='ctnrs',
through='CtnrUser', blank=True)
domains = models.ManyToManyField(Domain, null=False, blank=True)
ranges = models.ManyToManyField(Range, null=False, blank=True)
workgroups = models.ManyToManyField(Workgroup, null=False, blank=True)
description = models.CharField(max_length=200, blank=True)
email_contact = models.CharField(max_length=75, blank=True)
search_fields = ('name', 'description', 'email_contact')
sort_fields = ('name',)
class Meta:
app_label = 'cyder'
db_table = 'ctnr'
@transaction_atomic
def save(self, *args, **kwargs):
self.full_clean()
super(Ctnr, self).save(*args, **kwargs)
@transaction_atomic
def delete(self, *args, **kwargs):
UserProfile = get_model('cyder','userprofile')
for user_profile in UserProfile.objects.filter(default_ctnr_id=self.id):
user_profile.default_ctnr_id = 1
user_profile.save()
super(Ctnr, self).delete(*args, **kwargs)
def __unicode__(self):
return self.name
@staticmethod
def filter_by_ctnr(ctnr, objects=None):
return Ctnr.objects.filter(pk=ctnr.pk)
def check_contains_obj(self, obj):
if self.name == "global":
return True
if hasattr(obj, 'check_in_ctnr'):
return obj.check_in_ctnr(self)
if isinstance(obj, Ctnr):
return obj == self
if hasattr(obj, 'ctnr'):
return obj.ctnr == self
for f in [self.users, self.domains, self.ranges, self.workgroups]:
m = f.model
if isinstance(obj, m):
return f.filter(pk=obj.pk).exists()
raise Exception("Permissions check on unknown object type: %s" % type(obj))
def details(self):
data = super(Ctnr, self).details()
data['data'] = (
('Name', 'name', self),
('Description', 'description', self.description),
)
return data
@staticmethod
def eg_metadata():
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
{'name': 'description', 'datatype': 'string', 'editable': True},
]}
def build_legacy_classes(self, ip_type):
if ip_type == '4':
ranges = self.ranges.filter(
Q(range_type=DYNAMIC, dhcp_enabled=True) |
Q(start_str='10.255.255.255'), ip_type='4')
elif ip_type == '6':
ranges = self.ranges.filter(
ip_type='6', range_type=DYNAMIC, dhcp_enabled=True)
build_str = ""
for range_ in ranges:
classname = '{0}:{1}:{2}'.format(
self.name, range_.start_str, range_.end_str)
build_str += (
'class "{0}" {{\n'
'\tmatch hardware;\n'
'}}\n'.format(classname))
clients = range_.dynamicinterface_set.filter(
system__ctnr=self, dhcp_enabled=True).exclude(mac=None)
for client in clients:
build_str += client.build_subclass(classname)
return build_str
class CtnrUser(BaseModel, ObjectUrlMixin):
user = models.ForeignKey(User)
ctnr = models.ForeignKey(Ctnr)
level = models.IntegerField(
validators=[validate_integer_field])
class Meta:
app_label = 'cyder'
db_table = 'ctnr_users'
unique_together = ('ctnr', 'user')
def __str__(self):
return self.ctnr.name
def get_detail_url(self):
return self.ctnr.get_detail_url()
def details(self):
data = super(CtnrUser, self).details()
data['data'] = (
('Container', 'ctnr', self),
('User', 'user', self.user),
('Level', 'level', LEVELS[self.level]),
)
return data
def objects_removed(ctnr, objects, objtype="domain"):
for obj in objects:
for klass, _ in get_klasses():
if klass is Domain or klass is Range:
continue
if ((hasattr(klass, objtype) or hasattr(klass, "%s_set" % objtype))
and (hasattr(klass, "ctnr")
or hasattr(klass, "ctnr_set"))):
results = klass.filter_by_ctnr(ctnr, objects=None)
if issubclass(klass, BasePTR) and objtype == "range":
results = [p for p in results.all() if p.range == obj]
else:
try:
kwargs = {objtype: obj}
except FieldError:
continue
results = results.filter(**kwargs)
if results:
raise ValidationError(
"Cannot remove {0} because some {1} depends on"
" this {0} and container.".format(objtype,
klass.pretty_type))
def domains_changed(sender, **kwargs):
action = kwargs['action']
if action == "pre_remove":
ctnr = kwargs['instance']
domains = Domain.objects.filter(pk__in=kwargs['pk_set'])
objects_removed(ctnr, domains, objtype="domain")
def ranges_changed(sender, **kwargs):
action = kwargs['action']
if action == "pre_remove":
ctnr = kwargs['instance']
ranges = Range.objects.filter(pk__in=kwargs['pk_set'])
objects_removed(ctnr, ranges, objtype="range")
def workgroups_changed(sender, **kwargs):
action = kwargs['action']
if action == "pre_remove":
ctnr = kwargs['instance']
workgroups = Workgroup.objects.filter(pk__in=kwargs['pk_set'])
objects_removed(ctnr, workgroups, objtype="workgroup")
m2m_changed.connect(domains_changed, sender=Ctnr.domains.through)
m2m_changed.connect(ranges_changed, sender=Ctnr.ranges.through)
m2m_changed.connect(workgroups_changed, sender=Ctnr.workgroups.through)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for spell factory
"""
from functools import partial
from pyherc.aspects import log_debug, log_info
from pyherc.data.effects import EffectHandle
from pyherc.data.geometry import get_target_in_direction, TargetData
from pyherc.data import blocks_los, get_character
from pyherc.data.magic import Spell
from pyherc.rules.los import get_fov_matrix
class SpellGenerator():
"""
Factory for creating spells
.. versionadded:: 0.9
"""
@log_debug
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.spell_list = {}
self.__init_spells()
@log_debug
def __init_spells(self):
"""
Temporary implementation for spell loading
"""
healing_spell = SpellSpecification([
EffectHandle(trigger='on spell hit',
effect='heal medium wounds',
parameters=None,
charges=1)],
targeting_caster,
spirit=5)
magic_missile = SpellSpecification([
EffectHandle(trigger='on spell hit',
effect='cause wound',
parameters=None,
charges=1)],
targeting_single_target,
spirit=7)
fireball = SpellSpecification([
EffectHandle(trigger='on spell hit',
effect='fire',
parameters=None,
charges=1),
EffectHandle(trigger='on spell hit',
effect='cause wound',
parameters=None,
charges=1)],
partial(targeting_spherical_area, radius=3),
spirit=10)
self.spell_list['healing wind'] = healing_spell
self.spell_list['magic missile'] = magic_missile
self.spell_list['fireball'] = fireball
@log_info
def create_spell(self, spell_name, targets):
"""
Create a spell
:param spell_name: name of the spell
:type spell_name: string
:param target: target of the spell
:type target: Character
:returns: ready to use spell
:rtype: Spell
"""
new_spell = Spell()
new_spell.targets.extend(targets)
spec = self.spell_list[spell_name]
new_spell.spirit = spec.spirit
handles = spec.effect_handles
for effect_handle in handles:
new_spell.add_effect_handle(effect_handle)
return new_spell
class SpellSpecification():
"""
Class to specify spell configuration
.. versionadded:: 0.9
"""
@log_debug
def __init__(self, effect_handles, targeter, spirit):
super().__init__()
self.effect_handles = effect_handles
self.targeter = targeter
self.spirit = spirit
def targeting_caster(parameters):
"""
Function to target the caster
.. versionadded:: 0.9
"""
return [TargetData('character',
parameters.caster.location,
parameters.caster,
None)]
@log_info
def targeting_single_target(parameters):
"""
Function to target a single character in direction
.. versionadded:: 0.9
"""
target = get_target_in_direction(level=parameters.caster.level,
location=parameters.caster.location,
direction=parameters.direction)
if target:
return [target]
else:
return []
@log_info
def targeting_spherical_area(parameters, radius):
"""
Function to target a spherical area
.. versionadded:: 0.10
"""
targets = []
initial = get_target_in_direction(level=parameters.caster.level,
location=parameters.caster.location,
direction=parameters.direction)
if initial and initial.previous_target:
splash_center = initial.previous_target.location
level = parameters.caster.level
matrix = get_fov_matrix(splash_center,
level,
radius)
x_range = range(splash_center[0] - radius,
splash_center[0] + radius + 1)
y_range = range(splash_center[1] - radius,
splash_center[1] + radius + 1)
for location, is_visible in matrix.items():
if is_visible:
creature = get_character(level, location)
if creature:
targets.append(TargetData('character',
location,
creature,
None))
elif blocks_los(level, location):
targets.append(TargetData('wall',
location,
None,
None))
else:
targets.append(TargetData('void',
location,
None,
None))
return targets
|
|
#!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
GcloudCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import random
# Not all genearated modules use this.
# pylint: disable=unused-import
import re
import shutil
import string
import subprocess
import tempfile
import yaml
# Not all genearated modules use this.
# pylint: disable=unused-import
import copy
# pylint: disable=import-error
from apiclient.discovery import build
# pylint: disable=import-error
from oauth2client.client import GoogleCredentials
from ansible.module_utils.basic import AnsibleModule
class GcloudCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GcloudCLI(object):
''' Class to wrap the command line tools '''
def __init__(self, credentials=None, project=None, verbose=False):
''' Constructor for GcloudCLI '''
self.scope = None
self._project = project
if not credentials:
self.credentials = GoogleCredentials.get_application_default()
else:
tmp = tempfile.NamedTemporaryFile()
tmp.write(json.dumps(credentials))
tmp.seek(0)
self.credentials = GoogleCredentials.from_stream(tmp.name)
tmp.close()
self.scope = build('compute', 'beta', credentials=self.credentials)
self.verbose = verbose
@property
def project(self):
'''property for project'''
return self._project
def _create_image(self, image_name, image_info):
'''create an image name'''
cmd = ['compute', 'images', 'create', image_name]
for key, val in image_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_image(self, image_name):
'''delete image by name '''
cmd = ['compute', 'images', 'delete', image_name]
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_images(self, image_name=None):
'''list images.
if name is supplied perform a describe and return
'''
cmd = ['compute', 'images']
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_deployments(self, simple=True):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'list']
if simple:
cmd.append('--simple-list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_deployment(self, dname):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'delete', dname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'create', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _update_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'update', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_manifests(self, deployment, mname=None):
''' list manifests
if a name is specified then perform a describe
'''
cmd = ['deployment-manager', 'manifests', '--deployment', deployment]
if mname:
cmd.extend(['describe', mname])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_address(self, aname):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses', 'delete', aname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_addresses(self, aname=None):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses']
if aname:
cmd.extend(['describe', aname])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_address(self, address_name, address_info, address=None, isglobal=False):
''' create a deployment'''
cmd = ['compute', 'addresses', 'create', address_name]
if address:
cmd.append(address)
if isglobal:
cmd.append('--global')
for key, val in address_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_metadata(self, resource_type, name=None, zone=None):
''' list metadata'''
cmd = ['compute', resource_type, 'describe']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _delete_metadata(self, resource_type, keys, remove_all=False, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'remove-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
if remove_all:
cmd.append('--all')
else:
cmd.append('--keys')
cmd.append(','.join(keys))
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _create_metadata(self, resource_type, metadata=None, metadata_from_file=None, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'add-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
data = None
if metadata_from_file:
cmd.append('--metadata-from-file')
data = metadata_from_file
else:
cmd.append('--metadata')
data = metadata
cmd.append(','.join(['%s=%s' % (key, val) for key, val in data.items()]))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_accounts(self, sa_name=None):
'''return service accounts '''
cmd = ['iam', 'service-accounts']
if sa_name:
cmd.extend(['describe', sa_name])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account(self, sa_name):
'''delete service account '''
cmd = ['iam', 'service-accounts', 'delete', sa_name, '-q']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account(self, sa_name, display_name=None):
'''create service account '''
cmd = ['iam', 'service-accounts', 'create', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _update_service_account(self, sa_name, display_name=None):
'''update service account '''
cmd = ['iam', 'service-accounts', 'update', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account_key(self, sa_name, key_id):
'''delete service account key'''
cmd = ['iam', 'service-accounts', 'keys', 'delete', key_id, '--iam-account', sa_name, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_account_keys(self, sa_name):
'''return service account keys '''
cmd = ['iam', 'service-accounts', 'keys', 'list', '--iam-account', sa_name]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account_key(self, sa_name, outputfile, key_format='p12'):
'''create service account key '''
# Ensure we remove the key file
atexit.register(Utils.cleanup, [outputfile])
cmd = ['iam', 'service-accounts', 'keys', 'create', outputfile,
'--iam-account', sa_name, '--key-file-type', key_format]
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_project_policy(self, project):
'''create service account key '''
cmd = ['projects', 'get-iam-policy', project]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _add_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _remove_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'remove-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _set_project_policy(self, project, policy_path):
'''create service account key '''
cmd = ['projects', 'set-iam-policy', project, policy_path]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_zones(self):
''' list zones '''
cmd = ['compute', 'zones', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _config_set(self, config_param, config_value, config_section):
''' set config params with gcloud config set '''
param = config_section + '/' + config_param
cmd = ['config', 'set', param, config_value]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_config(self):
'''return config '''
cmd = ['config', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def list_disks(self, zone=None, disk_name=None):
'''return a list of disk objects in this project and zone'''
cmd = ['beta', 'compute', 'disks']
if disk_name and zone:
cmd.extend(['describe', disk_name, '--zone', zone])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
# disabling too-many-arguments as these are all required for the disk labels
# pylint: disable=too-many-arguments
def _set_disk_labels(self, project, zone, dname, labels, finger_print):
'''create service account key '''
if labels == None:
labels = {}
self.scope = build('compute', 'beta', credentials=self.credentials)
body = {'labels': labels, 'labelFingerprint': finger_print}
result = self.scope.disks().setLabels(project=project,
zone=zone,
resource=dname,
body=body,
).execute()
return result
def gcloud_cmd(self, cmd, output=False, output_type='json'):
'''Base command for gcloud '''
cmds = ['/usr/bin/gcloud']
if self.project:
cmds.extend(['--project', self.project])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
################################################################################
# utilities and helpers for generation
################################################################################
class Utils(object):
''' utilities for openshiftcli modules '''
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def global_compute_url(project, collection, rname):
'''build the global compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', rname])
@staticmethod
def zonal_compute_url(project, zone, collection, rname):
'''build the zone compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', rname])
@staticmethod
def generate_random_name(size):
'''generate a random string of lowercase and digits the length of size'''
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
# pylint: disable=too-many-instance-attributes
class GcloudDeploymentManager(GcloudCLI):
''' Class to wrap the gcloud deployment manager '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
dname,
config=None,
opts=None,
credentials=None,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudDeploymentManager, self).__init__()
self.dname = dname
self.opts = opts
self.config = config
self.credentials = credentials
def list_deployments(self):
'''return deployment'''
results = self._list_deployments()
if results['returncode'] == 0:
results['results'] = results['results'].strip().split('\n')
return results
def exists(self):
''' return whether a deployment exists '''
deployments = self.list_deployments()
if deployments['returncode'] != 0:
raise GcloudCLIError('Something went wrong. Results: %s' % deployments['stderr'])
return self.dname in deployments['results']
def delete(self):
'''delete a deployment'''
return self._delete_deployment(self.dname)
def create_deployment(self):
'''create a deployment'''
return self._create_deployment(self.dname, self.config, self.opts)
def update_deployment(self):
'''update a deployment'''
return self._update_deployment(self.dname, self.config, self.opts)
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud deployment-manager deployments '''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
config=dict(default=None, type='dict'),
config_path=dict(default=None, type='str'),
opts=dict(default=None, type='dict'),
),
supports_check_mode=True,
required_one_of=[['config', 'config_path']],
)
config = None
if module.params['config'] != None:
config = module.params['config']
else:
config = module.params['config_path']
gconfig = GcloudDeploymentManager(module.params['name'],
config,
module.params['opts'])
state = module.params['state']
api_rval = gconfig.list_deployments()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gconfig.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gconfig.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not gconfig.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = gconfig.create_deployment()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
api_rval = gconfig.update_deployment()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
#if __name__ == '__main__':
# gcloud = GcloudDeploymentManager('optestgcp')
# print gcloud.list_deployments()
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long, import-outside-toplevel, raise-missing-from
import datetime as dt
from datetime import datetime
import os
import random
import subprocess
import secrets
import string
import yaml
from knack.log import get_logger
from knack.prompting import prompt_y_n, NoTTYException
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.util import CLIError
from azure.cli.core.azclierror import AuthenticationError
from azure.core.paging import ItemPaged
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.commands import LongRunningOperation, _is_poller
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
from azure.cli.command_modules.role.custom import create_service_principal_for_rbac
from azure.mgmt.resource.resources.models import ResourceGroup
from ._client_factory import resource_client_factory, cf_mysql_flexible_location_capabilities, cf_postgres_flexible_location_capabilities
logger = get_logger(__name__)
DEFAULT_LOCATION_PG = 'eastus' # For testing: 'eastus2euap'
DEFAULT_LOCATION_MySQL = 'westus2'
AZURE_CREDENTIALS = 'AZURE_CREDENTIALS'
AZURE_POSTGRESQL_CONNECTION_STRING = 'AZURE_POSTGRESQL_CONNECTION_STRING'
AZURE_MYSQL_CONNECTION_STRING = 'AZURE_MYSQL_CONNECTION_STRING'
GITHUB_ACTION_PATH = '/.github/workflows/'
def resolve_poller(result, cli_ctx, name):
if _is_poller(result):
return LongRunningOperation(cli_ctx, 'Starting {}'.format(name))(result)
return result
def create_random_resource_name(prefix='azure', length=15):
append_length = length - len(prefix)
digits = [str(random.randrange(10)) for i in range(append_length)]
return prefix + ''.join(digits)
def generate_missing_parameters(cmd, location, resource_group_name, server_name, db_engine):
# If resource group is there in local context, check for its existence.
if resource_group_name is not None:
logger.warning('Checking the existence of the resource group \'%s\'...', resource_group_name)
resource_group_exists = _check_resource_group_existence(cmd, resource_group_name)
logger.warning('Resource group \'%s\' exists ? : %s ', resource_group_name, resource_group_exists)
else:
resource_group_exists = False
# set location to be same as RG's if not specified
if not resource_group_exists:
if not location:
location = DEFAULT_LOCATION_PG if db_engine == 'postgres' else DEFAULT_LOCATION_MySQL
resource_group_name = _create_resource_group(cmd, location, resource_group_name)
else:
resource_group_client = resource_client_factory(cmd.cli_ctx).resource_groups
resource_group = resource_group_client.get(resource_group_name=resource_group_name)
if not location:
location = resource_group.location
# If servername is not passed, always create a new server - even if it is stored in the local context
if server_name is None:
server_name = create_random_resource_name('server')
return location, resource_group_name, server_name.lower()
def generate_password(administrator_login_password):
if administrator_login_password is None:
passwordlength = 16
administrator_login_password = secrets.token_urlsafe(passwordlength)
index = administrator_login_password.find("-")
if index != -1:
replaced_char = random.choice(string.ascii_letters)
administrator_login_password = administrator_login_password.replace("-", replaced_char)
return administrator_login_password
# pylint: disable=inconsistent-return-statements
def parse_public_access_input(public_access):
# pylint: disable=no-else-return
if public_access is not None:
parsed_input = public_access.split('-')
if len(parsed_input) == 1:
return parsed_input[0], parsed_input[0]
elif len(parsed_input) == 2:
return parsed_input[0], parsed_input[1]
else:
raise InvalidArgumentValueError('incorrect usage: --public-access. Acceptable values are \'all\', \'none\',\'<startIP>\' and \'<startIP>-<destinationIP>\' '
'where startIP and destinationIP ranges from 0.0.0.0 to 255.255.255.255')
def server_list_custom_func(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def flexible_firewall_rule_update_custom_func(instance, start_ip_address=None, end_ip_address=None):
if start_ip_address is not None:
instance.start_ip_address = start_ip_address
if end_ip_address is not None:
instance.end_ip_address = end_ip_address
return instance
def update_kwargs(kwargs, key, value):
if value is not None:
kwargs[key] = value
def parse_maintenance_window(maintenance_window_string):
parsed_input = maintenance_window_string.split(':')
# pylint: disable=no-else-return
if len(parsed_input) == 1:
return _map_maintenance_window(parsed_input[0]), None, None
elif len(parsed_input) == 2:
return _map_maintenance_window(parsed_input[0]), parsed_input[1], None
elif len(parsed_input) == 3:
return _map_maintenance_window(parsed_input[0]), parsed_input[1], parsed_input[2]
return None, None, None
def get_mysql_versions(sku_info, tier):
return _get_available_values(sku_info, 'versions', tier)
def get_mysql_skus(sku_info, tier):
return _get_available_values(sku_info, 'skus', tier)
def get_mysql_storage_size(sku_info, tier):
return _get_available_values(sku_info, 'storage_sizes', tier)
def get_mysql_backup_retention(sku_info, tier):
return _get_available_values(sku_info, 'backup_retention', tier)
def get_mysql_tiers(sku_info):
return list(sku_info.keys())
def get_postgres_versions(sku_info, tier):
return _get_available_values(sku_info, 'versions', tier)
def get_postgres_skus(sku_info, tier):
return _get_available_values(sku_info, 'skus', tier)
def get_postgres_storage_sizes(sku_info, tier):
return _get_available_values(sku_info, 'storage_sizes', tier)
def get_postgres_tiers(sku_info):
return list(sku_info.keys())
def get_postgres_list_skus_info(cmd, location):
list_skus_client = cf_postgres_flexible_location_capabilities(cmd.cli_ctx, '_')
list_skus_result = list_skus_client.execute(location)
return _postgres_parse_list_skus(list_skus_result, 'postgres')
def get_mysql_list_skus_info(cmd, location):
list_skus_client = cf_mysql_flexible_location_capabilities(cmd.cli_ctx, '_')
list_skus_result = list_skus_client.list(location)
return _mysql_parse_list_skus(list_skus_result, 'mysql')
def _postgres_parse_list_skus(result, database_engine):
result = _get_list_from_paged_response(result)
if not result:
raise InvalidArgumentValueError("No available SKUs in this location")
single_az = not result[0].zone_redundant_ha_supported
tiers = result[0].supported_flexible_server_editions
tiers_dict = {}
for tier_info in tiers:
tier_name = tier_info.name
tier_dict = {}
skus = set()
versions = set()
for version in tier_info.supported_server_versions:
versions.add(version.name)
for vcores in version.supported_vcores:
skus.add(vcores.name)
tier_dict["skus"] = skus
tier_dict["versions"] = versions
storage_info = tier_info.supported_storage_editions[0]
storage_sizes = set()
for size in storage_info.supported_storage_mb:
storage_sizes.add(int(size.storage_size_mb // 1024))
tier_dict["storage_sizes"] = storage_sizes
tiers_dict[tier_name] = tier_dict
return {'sku_info': tiers_dict,
'single_az': single_az}
def _mysql_parse_list_skus(result, database_engine):
result = _get_list_from_paged_response(result)
if not result:
raise InvalidArgumentValueError("No available SKUs in this location")
single_az = 'ZoneRedundant' not in result[0].supported_ha_mode
geo_paried_region = result[0].supported_geo_backup_regions
tiers = result[0].supported_flexible_server_editions
tiers_dict = {}
iops_dict = {}
for tier_info in tiers:
tier_name = tier_info.name
tier_dict = {}
sku_iops_dict = {}
skus = set()
versions = set()
for version in tier_info.supported_server_versions:
versions.add(version.name)
for supported_sku in version.supported_skus:
skus.add(supported_sku.name)
sku_iops_dict[supported_sku.name] = supported_sku.supported_iops
tier_dict["skus"] = skus
tier_dict["versions"] = versions
storage_info = tier_info.supported_storage_editions[0]
tier_dict["backup_retention"] = (storage_info.min_backup_retention_days, storage_info.max_backup_retention_days)
tier_dict["storage_sizes"] = (int(storage_info.min_storage_size) // 1024, int(storage_info.max_storage_size) // 1024)
iops_dict[tier_name] = sku_iops_dict
tiers_dict[tier_name] = tier_dict
return {'sku_info': tiers_dict,
'single_az': single_az,
'iops_info': iops_dict,
'geo_paired_regions': geo_paried_region}
def _get_available_values(sku_info, argument, tier=None):
result = {key: val[argument] for key, val in sku_info.items()}
return result[tier]
def _get_list_from_paged_response(obj_list):
return list(obj_list) if isinstance(obj_list, ItemPaged) else obj_list
def _create_resource_group(cmd, location, resource_group_name):
if resource_group_name is None:
resource_group_name = create_random_resource_name('group')
params = ResourceGroup(location=location)
resource_client = resource_client_factory(cmd.cli_ctx)
logger.warning('Creating Resource Group \'%s\'...', resource_group_name)
resource_client.resource_groups.create_or_update(resource_group_name, params)
return resource_group_name
def _check_resource_group_existence(cmd, resource_group_name):
resource_client = resource_client_factory(cmd.cli_ctx)
return resource_client.resource_groups.check_existence(resource_group_name)
# Map day_of_week string to integer to day of week
# Possible values can be 0 - 6
def _map_maintenance_window(day_of_week):
options = {"Mon": 1,
"Tue": 2,
"Wed": 3,
"Thu": 4,
"Fri": 5,
"Sat": 6,
"Sun": 0,
}
return options[day_of_week]
def get_current_time():
return datetime.utcnow().replace(tzinfo=dt.timezone.utc, microsecond=0).isoformat()
def get_id_components(rid):
parsed_rid = parse_resource_id(rid)
subscription = parsed_rid['subscription']
resource_group = parsed_rid['resource_group']
name = parsed_rid['name']
child_name = parsed_rid['child_name_1'] if 'child_name_1' in parsed_rid else None
return subscription, resource_group, name, child_name
def check_existence(resource_client, value, resource_group, provider_namespace, resource_type,
parent_name=None, parent_type=None):
from azure.core.exceptions import HttpResponseError
parent_path = ''
if parent_name and parent_type:
parent_path = '{}/{}'.format(parent_type, parent_name)
api_version = _resolve_api_version(resource_client, provider_namespace, resource_type, parent_path)
try:
resource_client.resources.get(resource_group, provider_namespace, parent_path, resource_type, value, api_version)
except HttpResponseError:
return False
return True
def _resolve_api_version(client, provider_namespace, resource_type, parent_path):
provider = client.providers.get(provider_namespace)
# If available, we will use parent resource's api-version
resource_type_str = (parent_path.split('/')[0] if parent_path else resource_type)
rt = [t for t in provider.resource_types # pylint: disable=no-member
if t.resource_type.lower() == resource_type_str.lower()]
if not rt:
raise InvalidArgumentValueError('Resource type {} not found.'.format(resource_type_str))
if len(rt) == 1 and rt[0].api_versions:
npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()]
return npv[0] if npv else rt[0].api_versions[0]
raise RequiredArgumentMissingError(
'API version is required and could not be resolved for resource {}'
.format(resource_type))
def run_subprocess(command, stdout_show=None):
if stdout_show:
process = subprocess.Popen(command, shell=True)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
if process.returncode:
logger.warning(process.stderr.read().strip().decode('UTF-8'))
def run_subprocess_get_output(command):
commands = command.split()
process = subprocess.Popen(commands, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
return process
def register_credential_secrets(cmd, database_engine, server, repository):
logger.warning('Adding secret "AZURE_CREDENTIALS" to github repository')
resource_group = parse_resource_id(server.id)["resource_group"]
provider = "DBforMySQL"
if database_engine == "postgresql":
provider = "DBforPostgreSQL"
scope = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.{}/flexibleServers/{}".format(get_subscription_id(cmd.cli_ctx), resource_group, provider, server.name)
app = create_service_principal_for_rbac(cmd, name=server.name, role='contributor', scopes=[scope])
app['clientId'], app['clientSecret'], app['tenantId'] = app.pop('appId'), app.pop('password'), app.pop('tenant')
app['subscriptionId'] = get_subscription_id(cmd.cli_ctx)
app.pop('displayName')
app.pop('name')
app_key_val = []
for key, val in app.items():
app_key_val.append('"{}": "{}"'.format(key, val))
app_json = ',\n '.join(app_key_val)
app_json = '{\n ' + app_json + '\n}'
credential_file = "./temp_app_credential.txt"
with open(credential_file, "w") as f:
f.write(app_json)
run_subprocess('gh secret set {} --repo {} < {}'.format(AZURE_CREDENTIALS, repository, credential_file))
os.remove(credential_file)
def register_connection_secrets(cmd, database_engine, server, database_name, administrator_login, administrator_login_password, repository, connection_string_name):
logger.warning("Added secret %s to github repository", connection_string_name)
if database_engine == 'postgresql':
connection_string = "host={} port=5432 dbname={} user={} password={} sslmode=require".format(server.fully_qualified_domain_name, database_name, administrator_login, administrator_login_password)
run_subprocess('gh secret set {} --repo {} -b"{}"'.format(connection_string_name, repository, connection_string))
elif database_engine == 'mysql':
connection_string = "Server={}; Port=3306; Database={}; Uid={}; Pwd={}; SslMode=Preferred;".format(server.fully_qualified_domain_name, database_name, administrator_login, administrator_login_password)
run_subprocess('gh secret set {} --repo {} -b"{}"'.format(connection_string_name, repository, connection_string))
def fill_action_template(cmd, database_engine, server, database_name, administrator_login, administrator_login_password, file_name, action_name, repository):
action_dir = get_git_root_dir() + GITHUB_ACTION_PATH
if not os.path.exists(action_dir):
os.makedirs(action_dir)
process = run_subprocess_get_output("gh secret list --repo {}".format(repository))
github_secrets = process.stdout.read().strip().decode('UTF-8')
# connection_string = AZURE_POSTGRESQL_CONNECTION_STRING if database_engine == 'postgresql' else AZURE_MYSQL_CONNECTION_STRING
if AZURE_CREDENTIALS not in github_secrets:
try:
register_credential_secrets(cmd,
database_engine=database_engine,
server=server,
repository=repository)
except CloudError:
raise AuthenticationError('You do not have authorization to create a service principal to run azure service in github actions. \n'
'Please create a service principal that has access to the database server and add "AZURE_CREDENTIALS" secret to your github repository. \n'
'Follow the instruction here "aka.ms/github-actions-azure-credentials".')
connection_string_name = server.name.upper().replace("-", "_") + "_" + database_name.upper().replace("-", "_") + "_" + database_engine.upper() + "_CONNECTION_STRING"
if connection_string_name not in github_secrets:
register_connection_secrets(cmd,
database_engine=database_engine,
server=server,
database_name=database_name,
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
repository=repository,
connection_string_name=connection_string_name)
current_location = os.path.dirname(__file__)
with open(current_location + "/templates/" + database_engine + "_githubaction_template.yaml", "r") as template_file:
template = yaml.safe_load(template_file)
template['jobs']['build']['steps'][2]['with']['server-name'] = server.fully_qualified_domain_name
if database_engine == 'postgresql':
template['jobs']['build']['steps'][2]['with']['plsql-file'] = file_name
else:
template['jobs']['build']['steps'][2]['with']['sql-file'] = file_name
template['jobs']['build']['steps'][2]['with']['connection-string'] = "${{ secrets." + connection_string_name + " }}"
with open(action_dir + action_name + '.yml', 'w', encoding='utf8') as yml_file:
yml_file.write("on: [workflow_dispatch]\n")
yml_file.write(yaml.dump(template))
def get_git_root_dir():
process = run_subprocess_get_output("git rev-parse --show-toplevel")
return process.stdout.read().strip().decode('UTF-8')
def get_user_confirmation(message, yes=False):
if yes:
return True
try:
if not prompt_y_n(message):
return False
return True
except NoTTYException:
raise CLIError(
'Unable to prompt for confirmation as no tty available. Use --yes.')
def _is_resource_name(resource):
if len(resource.split('/')) == 1:
return True
return False
|
|
import sys
from Bio import SeqIO
from Modules.p05_ParseGff import *
from natsort import natsorted
#===============================================================================
# snpEff and snpSift
#===============================================================================
def snpEff_annotateVCF(vcf,snpEff,genome):
"""
This function annotation vcf using snpEff,
return annotated eff file
* vcf: vcf file that need to be annotated
* snpEff: snpEff software
* genome: genome database built using snpEff
"""
configure = snpEff[:-3] + 'config'
output = vcf[:-3] + 'eff.vcf'
cmd = ('java -jar {snpEff} -c {configure} -v {genome} {vcf} > {output}').format(
snpEff=snpEff,configure=configure, genome=genome,vcf=vcf,output=output)
subprocess.call(cmd,shell=True)
return output
def snpSift_filterVCF(annotatedVCF,snpSift,filters=''):
"""
This function filter the vcf inputwith snpSift
*vcf: annotated vcf file
*snpSift: pathway to snpSift
*filters': a list of arguments used to filter the file
"""
outputFile = annotatedVCF[:-3] + 'target.vcf' # variant only for interested genes
filterCmd = ' '.join(filters)
filterCmd = '\"' + filterCmd + '\"'
# extract variants of target genes.
cmd = ('java -jar {snpSift} filter {filterCmd} {inputVcf} | java -jar {snpSift} extractFields -s {sep} -e {empty} - '
'CHROM \"ANN[*].EFFECT\" \"ANN[*].GENE\" \"ANN[*].IMPACT\" '
'\"ANN[*].FEATUREID\" \"ANN[*].HGVS_P\" > {output}').format(snpSift=snpSift,
filterCmd=filterCmd,inputVcf=annotatedVCF,sep='\',\'',empty='\'.\'',output = outputFile)
subprocess.call(cmd,shell=True)
return outputFile
#===============================================================================
# Provean analysis related functions
#===============================================================================
def vcf2input4provean(filteredVCF,record_dict,gffFile,CodonFile):
"""
This function prepares input files for provean. protein.fa and variant.txt
* filteredVCF: vcf file filtered using snpSift
* record_dict:
* gffFile: str. annotation filename.
* CodonFile: str.
"""
vcf_df = pd.read_csv(filteredVCF,sep='\t')
if vcf_df.empty:
return [[],[]]
##------- 1. define the dictionaries and file lists ------------
# pro_vari_dic = {} # dictionary. Format: transcriptID:[protein variants]
# trid_gene_dic = {} # dictionary. Format: transcriptID:[gene symbol]
# trid_chrom = {} # dictionary. Format: transcriptID:[chromosome]
gene_trid2chrom = {} # dictionary. Format: gene_transcriptID:chrome
gene_trid2vari = {} # dictionary. Format: gene_transcriptID:[protein variants]
protein_files = [] # stores input files of protein sequences
variant_files = [] # stores input files of variants
##------ 2. build the dictionaries. The reason for building the libraryies is that one ----------
# transcript may have many variants in different lines.
for i in range(len(vcf_df['#CHROM'])):
chrom = vcf_df['#CHROM'][i]
impacts = vcf_df['ANN[*].IMPACT'][i].split(',')
trids = vcf_df['ANN[*].FEATUREID'][i].split(',')
HGVS_P = vcf_df['ANN[*].HGVS_P'][i].split(',')
genes = vcf_df['ANN[*].GENE'][i].split(',')
for impact,trid,vari,gene in zip(impacts,trids,HGVS_P,genes):
if vari == '.':
continue
else:
# if trid is in the format of Transcript_genenumber, remove the transcript_
if 'Transcript_' in trid:
trid = trid[11:]
gene_trid = gene + '_' + trid
one_letter_vari = vari3letter2vari1letter(vari[2:])
if impact == 'HIGH' or impact == 'MODERATE':
try:
gene_trid2vari[gene_trid].append(one_letter_vari)
except:
gene_trid2vari[gene_trid] = [one_letter_vari]
# add gene_trid:chrom to the dictionary
if gene_trid not in gene_trid2chrom:
gene_trid2chrom[gene_trid] = chrom
else:
continue
gene_tridList = sorted(gene_trid2vari.keys())
#------- 3. generate a list of protein fa files and variant txt files ----
for gene_trid in gene_tridList:
item = gene_trid.split('_')
gene = item[0]; trid = item[1]
chrom = gene_trid2chrom[gene_trid]
protein_file = gene_trid + '.protein.fa'
vari_file = gene_trid + '.variant.txt'
protein_files.append(protein_file)
variant_files.append(vari_file)
### generate protein fa file
protein_output = open(protein_file,'w')
try:
AA = get_AA_from_gff(record_dict,gffFile,chrom,gene,trid,CodonFile)
protein_output.write(('>{gene_trid} \n{AA}').format(gene_trid=gene+'_'+trid,AA=AA))
except:
print 'fail to get',gene,'amino acid sequence.'
raise
protein_output.close()
### generate variant txt file
variant_output = open(vari_file,'w')
for vari in gene_trid2vari[gene_trid]:
variant_output.write(vari + '\n')
variant_output.close()
return [protein_files,variant_files]
def run_provean(provean_soft,protein,variant,support_set_path,support_set,thread = 1):
"""
This function runs provean program
* provean_soft: pathway to provean.sh
* protein: input fasta file which has query protein sequence
* variant: input txt file which has variant represented by one letter amino acids
* support_set_path: folder that has all the file.sss
* support_set: a list of files in format of 'file.sss', if provided, provean would skip the blast step to save time.
* thread: number of thread to run blast
"""
output = protein[:-2] + 'sss'
if output in support_set:
cmd = ('{provean} -q {fasta} -v {vari} --num_threads {thread} --supporting_set {set}').format(
provean=provean_soft,fasta=protein,vari=variant,thread=str(thread),set=support_set_path+'/'+output)
else:
cmd = ('{provean} -q {fasta} -v {vari} --num_threads {thread} --save_supporting_set {output}').format(
provean=provean_soft,fasta=protein,vari=variant,thread=str(thread),output=output)
result = subprocess.check_output(cmd.split(' '))
return result
def capture_provean_scores(outputFile,provean,proteinFiles,variantFiles,support_set_path,support_set,thread = 1):
"""
This function captures the provean scores originally output to terminal. Stores all the socres
into a file.
* outputFile: the file that stores all scores
* provean: pathway to provean
* protein_files: a list of protein fasta files
* variant_files: a list of variant txt files
* support_set_path: a folder that has all provided .sss file for provean
* support_set: the .sss file name
* thread: number of thread to run blast
"""
output = open(outputFile,'w')
output.write('# Gene_trID\tVARIATION\tSCORE\n')
for proteinFile,variFile in zip(proteinFiles,variantFiles):
result = run_provean(provean,proteinFile,variFile,support_set_path,support_set,thread)
#==== 1. put all variants in vari into a list
allVari = pd.read_csv(variFile,header=None,names=['Variant'])
proveanVari = [] # proveanVari cannot process frameshift in allVari
lines = result[:-1].split('\n')
#==== 2. capture the output of provean
for line in lines:
if line.startswith('#') or line.startswith('[') or line.startswith('No'):
continue
else:
item = line.split('\t')
output.write(variFile[:-12] + '\t' + '\t'.join(item) + '\n')
proveanVari.append(item[0])
#==== 3. output the varis that provean cannot process
for variant in allVari['Variant']:
if variant not in proveanVari:
output.write(variFile[:-12] + '\t' + variant + '\tNA' + '\n')
print proteinFile,'provean analysis finishes\n'
output.close()
# os.chdir('/data/shangzhong/VariantCall/CHOS_ToT_VS_chok1/filteredVcfResults/CHOS_TOT_A4')
# provean = '/home/shangzhong/Installation/provean-1.1.5/bin/provean.sh'
# proteinFiles = ['Nbn_rna17749.protein.fa']
# variantFiles = ['Nbn_rna17749.variant.txt']
# support_set_path = '/data/shangzhong/VariantCall/chok1_Provean_Supporting_Set'
# support_set = ['Nbn_rna17749.sss']
# outputFile = 'pro.txt'
# capture_provean_scores(outputFile,provean,proteinFiles,variantFiles,support_set_path,support_set,thread = 19)
def merge_all_provean_scores(previousFile,ref_name,*pathways):
"""
This function combines all the provean results together in one table.
if previousFile is provided, then the new result is appended to it.
* previousFile: file that has provean results.
* pathways: pathways that has folders for samples, with file named proveanScore.txt
The merged result is saved to file name ends with 'merge.csv' in the last pathway folder
Returns a pivot table. Row is Gene_TRID-Vari, column is Sample. Values is provean score.
"""
columns = ['Gene_TRID','Variant','Score','Sample','Reference']
if previousFile=='':
df = pd.DataFrame(columns=columns) # df stores all 5 columns information
else:
df = pd.read_csv(previousFile,sep='\t',header=None,
names=columns)
for pathway in pathways:
os.chdir(pathway)
folders = [f for f in os.listdir(pathway) if os.path.isdir(os.path.join(pathway,f))]
for folder in folders:
f = folder + '/' + 'proveanScore.txt'
data = pd.read_csv(f,sep='\t',header=0,names=['Gene_TRID','Variant','Score'])
sample = [folder]*len(data['Score'])
ref = [ref_name]*len(data['Score'])
data['Sample'] = pd.Series(sample,index=data.index)
data['Reference'] = pd.Series(ref,index=data.index)
data = data.fillna(0)
df = df.append(data,ignore_index=True)
# get pivot results of column: gene_trID_vari, rows: samples, values: score.
pivot = df[['Sample','Score']]
pivot.insert(0,'Gene_TRID_Vari',df['Gene_TRID'].map(str) + '-' + df['Variant'])
df_pivot = pd.pivot_table(pivot,values='Score',index='Gene_TRID-Vari',columns='Sample')
# output whole results to file
df.to_csv(previousFile+'merge.csv',sep='\t')
df_pivot.to_csv(previousFile+'pivot.csv',sep='\t')
# os.chdir('/data/shangzhong')
# merge_all_provean_scores('','chok1','/data/shangzhong/VariantCall/pgsaDNA_VS_chok1/filteredVcfResults',
# '/data/shangzhong/VariantCall/pgsa_VS_chok1/filteredVcfResults'
# )
# print 'done'
def merge_provean_results(folder,outFile):
os.chdir(folder)
files = [f for f in os.listdir(folder) if f.endswith('proveanScore.txt')]
files = natsorted(files)
df1 = pd.read_csv(files[0],sep='\t',header=0,names=['g_t_id','vari',files[0][:-16]])
df1 = df1.drop_duplicates()
for f in files[1:]:
df = pd.read_csv(f,sep='\t',header=0,names=['g_t_id','vari',f[:-16]])
df = df.drop_duplicates()
df1=df1.merge(df,on=['g_t_id','vari'],how='outer')
df1.to_csv(outFile,sep='\t',index=False)
# folder = '/data/shangzhong/DNArepair/Annotation/Provean_results'
# outFile = '/data/shangzhong/DNArepair/Annotation/FinalResults.txt'
# merge_provean_results(folder,outFile)
def merge_all_genes_with_diff_pr_from_refseq(previousFile,outputFile,commonFileName,*pathways):
"""
This function merges all transcripts in gff annotation file that has different protein sequence
from online refseq protein sequence
* previousFile: fasta file that will be added sequence
* outputFile: str. fasta file stores the fianl results
* commonFileName: str. In each folder of a sample, the name that stores inconsistant proteins are the same. 'Localdifferfromrefseq.fa'
* *pathways: pathways that have folders of samples, which have name LocalOnlineProteinDiff.fa
"""
columns = ['Gene','TrID']
if previousFile=='':
df = pd.DataFrame(columns=columns) # df stores 2 columns
else:
df = pd.read_csv(previousFile,sep='\t',header=None,
names=columns)
# 1. store all the description into one list
for pathway in pathways:
os.chdir(pathway)
folders = [f for f in os.listdir(pathway) if os.path.isdir(os.path.join(pathway,f))]
description = []
for folder in folders:
f = folder + '/' + commonFileName
sequence = SeqIO.parse(open(f,'rU'),'fasta')
for seq in sequence:
des = seq.description
if des not in description:
description.append(seq.description)
# 2. extract gene name and transcript id
for des in description:
# get transcript id
index = des.index("Parent=")
inter = des[index:]
index = inter.index(';')
trid = inter[7:index] # transcript id
# get gene name
index = des.index("gene=")
inter = des[index:]
index = inter.index(';')
gene = inter[5:index]
# build a df
row = pd.DataFrame({'Gene':[gene],'TrID':[trid]})
df = df.append(row)
# 3. output to file
df.to_csv(outputFile,sep='\t',index=False)
# merge_all_genes_with_diff_pr_from_refseq('','/data/shangzhong/VariantCall/Results/hamster_DNA_repair_pr_different_from_refseq.txt',
# 'hamster_DNA_repair_pr_different_from_refseq.fa',
# '/data/shangzhong/VariantCall/Results')
def addGT2ProveanPivotRes(provean_pivot,head,*filenames):
"""
This function adds gene type(Het/Hom) column to the end of the provean_pivot results
* provean_pivot: str. Filename of provean pivot results, should be csv tab delimited.
First 3 columns should be: 'Gene', 'transcript ID' and 'peptide variant'.
The rest columns should be provean scores for samples.
* filenames: str. names of .eff.vcf files of samples that are annotated by snpEff.
return filename with added gene type information
"""
# read the provean pivot table
df2 = pd.read_csv(provean_pivot,sep='\t',header=0)
# build a dictionary for each vcf file and add GT
for filename1 in filenames:
df1 = pd.read_csv(filename1,header=head,sep='\t')
colnames = df1.columns
dic = {}
for info,form in zip(df1["INFO"],df1[colnames[-1]]):
index = info.index('ANN')
inter = info[index:] # get all strings after ann
if ';' in inter:
index = inter.index(';')
ann = inter[:index]
else:
ann = inter
if ',' in ann:
ann_list = ann.split(',')
else:
ann_list = [ann]
for record in ann_list:
annotation = record.split('|')
# get vari
gene = annotation[3]
trid = annotation[6]
variant = annotation[10]
if variant !='':
vari_form = variant[2:]
vari = vari3letter2vari1letter(vari_form)
else:
continue
gt = form.split(':')[0]
key = gene+trid+vari
# define the variant type
if gt=='1/1':
value='Hom'
else:
value='Het'
if key in dic:
dic[key]=dic[key]+'&'+value
else:
dic[key]=value
# add GT information
handle2 = open(provean_pivot,'r')
genetype = []
next(handle2)
for line in handle2:
item = line.split('\t')
key = ''.join(item[:3])
try:
genetype.append(dic[key])
except:
genetype.append('')
data = pd.DataFrame({colnames[-1]+'_GeneType':genetype})
df2 = pd.concat([df2,data],axis=1)
output = provean_pivot[:-4] + '_GT.csv'
df2.to_csv(output,sep='\t',index=False)
return output
|
|
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.sdb.db.key import Key
from boto.sdb.db.model import Model
import psycopg2
import psycopg2.extensions
import uuid
import os
import string
from boto.exception import SDBPersistenceError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
class PGConverter:
def __init__(self, manager):
self.manager = manager
self.type_map = {Key : (self.encode_reference, self.decode_reference),
Model : (self.encode_reference, self.decode_reference)}
def encode(self, type, value):
if type in self.type_map:
encode = self.type_map[type][0]
return encode(value)
return value
def decode(self, type, value):
if type in self.type_map:
decode = self.type_map[type][1]
return decode(value)
return value
def encode_prop(self, prop, value):
if isinstance(value, list):
if hasattr(prop, 'item_type'):
s = "{"
new_value = []
for v in value:
item_type = getattr(prop, 'item_type')
if Model in item_type.mro():
item_type = Model
new_value.append('%s' % self.encode(item_type, v))
s += ','.join(new_value)
s += "}"
return s
else:
return value
return self.encode(prop.data_type, value)
def decode_prop(self, prop, value):
if prop.data_type == list:
if value != None:
if not isinstance(value, list):
value = [value]
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
if item_type != self.manager.cls:
return item_type._manager.decode_value(prop, value)
else:
item_type = Model
return [self.decode(item_type, v) for v in value]
return value
elif hasattr(prop, 'reference_class'):
ref_class = getattr(prop, 'reference_class')
if ref_class != self.manager.cls:
return ref_class._manager.decode_value(prop, value)
else:
return self.decode(prop.data_type, value)
elif hasattr(prop, 'calculated_type'):
calc_type = getattr(prop, 'calculated_type')
return self.decode(calc_type, value)
else:
return self.decode(prop.data_type, value)
def encode_reference(self, value):
if isinstance(value, str) or isinstance(value, unicode):
return value
if value == None:
return ''
else:
return value.id
def decode_reference(self, value):
if not value:
return None
try:
return self.manager.get_object_from_id(value)
except:
raise ValueError, 'Unable to convert %s to Object' % value
class PGManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, sql_dir, enable_ssl):
self.cls = cls
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.sql_dir = sql_dir
self.in_transaction = False
self.converter = PGConverter(self)
self._connect()
def _build_connect_string(self):
cs = 'dbname=%s user=%s password=%s host=%s port=%d'
return cs % (self.db_name, self.db_user, self.db_passwd,
self.db_host, self.db_port)
def _connect(self):
self.connection = psycopg2.connect(self._build_connect_string())
self.connection.set_client_encoding('UTF8')
self.cursor = self.connection.cursor()
def _object_lister(self, cursor):
try:
for row in cursor:
yield self._object_from_row(row, cursor.description)
except StopIteration:
cursor.close()
raise StopIteration
def _dict_from_row(self, row, description):
d = {}
for i in range(0, len(row)):
d[description[i][0]] = row[i]
return d
def _object_from_row(self, row, description=None):
if not description:
description = self.cursor.description
d = self._dict_from_row(row, description)
obj = self.cls(d['id'])
obj._manager = self
obj._auto_update = False
for prop in obj.properties(hidden=False):
if prop.data_type != Key:
v = self.decode_value(prop, d[prop.name])
v = prop.make_value_from_datastore(v)
if hasattr(prop, 'calculated_type'):
prop._set_direct(obj, v)
elif not prop.empty(v):
setattr(obj, prop.name, v)
else:
setattr(obj, prop.name, prop.default_value())
return obj
def _build_insert_qs(self, obj, calculated):
fields = []
values = []
templs = []
id_calculated = [p for p in calculated if p.name == 'id']
for prop in obj.properties(hidden=False):
if prop not in calculated:
value = prop.get_value_for_datastore(obj)
if value != prop.default_value() or prop.required:
value = self.encode_value(prop, value)
values.append(value)
fields.append('"%s"' % prop.name)
templs.append('%s')
qs = 'INSERT INTO "%s" (' % self.db_table
if len(id_calculated) == 0:
qs += '"id",'
qs += ','.join(fields)
qs += ") VALUES ("
if len(id_calculated) == 0:
qs += "'%s'," % obj.id
qs += ','.join(templs)
qs += ')'
if calculated:
qs += ' RETURNING '
calc_values = ['"%s"' % p.name for p in calculated]
qs += ','.join(calc_values)
qs += ';'
return qs, values
def _build_update_qs(self, obj, calculated):
fields = []
values = []
for prop in obj.properties(hidden=False):
if prop not in calculated:
value = prop.get_value_for_datastore(obj)
if value != prop.default_value() or prop.required:
value = self.encode_value(prop, value)
values.append(value)
field = '"%s"=' % prop.name
field += '%s'
fields.append(field)
qs = 'UPDATE "%s" SET ' % self.db_table
qs += ','.join(fields)
qs += """ WHERE "id" = '%s'""" % obj.id
if calculated:
qs += ' RETURNING '
calc_values = ['"%s"' % p.name for p in calculated]
qs += ','.join(calc_values)
qs += ';'
return qs, values
def _get_sql(self, mapping=None):
print '_get_sql'
sql = None
if self.sql_dir:
path = os.path.join(self.sql_dir, self.cls.__name__ + '.sql')
print path
if os.path.isfile(path):
fp = open(path)
sql = fp.read()
fp.close()
t = string.Template(sql)
sql = t.safe_substitute(mapping)
return sql
def start_transaction(self):
print 'start_transaction'
self.in_transaction = True
def end_transaction(self):
print 'end_transaction'
self.in_transaction = False
self.commit()
def commit(self):
if not self.in_transaction:
print '!!commit on %s' % self.db_table
try:
self.connection.commit()
except psycopg2.ProgrammingError, err:
self.connection.rollback()
raise err
def rollback(self):
print '!!rollback on %s' % self.db_table
self.connection.rollback()
def delete_table(self):
self.cursor.execute('DROP TABLE "%s";' % self.db_table)
self.commit()
def create_table(self, mapping=None):
self.cursor.execute(self._get_sql(mapping))
self.commit()
def encode_value(self, prop, value):
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def execute_sql(self, query):
self.cursor.execute(query, None)
self.commit()
def query_sql(self, query, vars=None):
self.cursor.execute(query, vars)
return self.cursor.fetchall()
def lookup(self, cls, name, value):
values = []
qs = 'SELECT * FROM "%s" WHERE ' % self.db_table
found = False
for property in cls.properties(hidden=False):
if property.name == name:
found = True
value = self.encode_value(property, value)
values.append(value)
qs += "%s=" % name
qs += "%s"
if not found:
raise SDBPersistenceError('%s is not a valid field' % name)
qs += ';'
print qs
self.cursor.execute(qs, values)
if self.cursor.rowcount == 1:
row = self.cursor.fetchone()
return self._object_from_row(row, self.cursor.description)
elif self.cursor.rowcount == 0:
raise KeyError, 'Object not found'
else:
raise LookupError, 'Multiple Objects Found'
def query(self, cls, filters, limit=None, order_by=None):
parts = []
qs = 'SELECT * FROM "%s"' % self.db_table
if filters:
qs += ' WHERE '
properties = cls.properties(hidden=False)
for filter, value in filters:
name, op = filter.strip().split()
found = False
for property in properties:
if property.name == name:
found = True
value = self.encode_value(property, value)
parts.append(""""%s"%s'%s'""" % (name, op, value))
if not found:
raise SDBPersistenceError('%s is not a valid field' % name)
qs += ','.join(parts)
qs += ';'
print qs
cursor = self.connection.cursor()
cursor.execute(qs)
return self._object_lister(cursor)
def get_property(self, prop, obj, name):
qs = """SELECT "%s" FROM "%s" WHERE id='%s';""" % (name, self.db_table, obj.id)
print qs
self.cursor.execute(qs, None)
if self.cursor.rowcount == 1:
rs = self.cursor.fetchone()
for prop in obj.properties(hidden=False):
if prop.name == name:
v = self.decode_value(prop, rs[0])
return v
raise AttributeError, '%s not found' % name
def set_property(self, prop, obj, name, value):
pass
value = self.encode_value(prop, value)
qs = 'UPDATE "%s" SET ' % self.db_table
qs += "%s='%s'" % (name, self.encode_value(prop, value))
qs += " WHERE id='%s'" % obj.id
qs += ';'
print qs
self.cursor.execute(qs)
self.commit()
def get_object(self, cls, id):
qs = """SELECT * FROM "%s" WHERE id='%s';""" % (self.db_table, id)
self.cursor.execute(qs, None)
if self.cursor.rowcount == 1:
row = self.cursor.fetchone()
return self._object_from_row(row, self.cursor.description)
else:
raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id))
def get_object_from_id(self, id):
return self.get_object(self.cls, id)
def _find_calculated_props(self, obj):
return [p for p in obj.properties() if hasattr(p, 'calculated_type')]
def save_object(self, obj):
obj._auto_update = False
calculated = self._find_calculated_props(obj)
if not obj.id:
obj.id = str(uuid.uuid4())
qs, values = self._build_insert_qs(obj, calculated)
else:
qs, values = self._build_update_qs(obj, calculated)
print qs
self.cursor.execute(qs, values)
if calculated:
calc_values = self.cursor.fetchone()
print calculated
print calc_values
for i in range(0, len(calculated)):
prop = calculated[i]
prop._set_direct(obj, calc_values[i])
self.commit()
def delete_object(self, obj):
qs = """DELETE FROM "%s" WHERE id='%s';""" % (self.db_table, obj.id)
print qs
self.cursor.execute(qs)
self.commit()
|
|
"""
Created on Jan 25, 2017
@author: Jafar Taghiyar (jtaghiyar@bccrc.ca)
"""
from datetime import datetime
#============================
# Django imports
#----------------------------
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
#============================
# App imports
#----------------------------
from core.decorators import Render
from .forms import (
SampleForm,
SampleClinicalInfoInlineFormset,
LibraryForm
)
from .models import Sample, Library
#============================
# Home page of the app
#----------------------------
@Render("bulk/home.html")
def home_view(request):
"""home page of the app."""
context = {}
return context
#============================
# Sample views
#----------------------------
@Render("bulk/sample_list.html")
def sample_list(request):
"""list of samples."""
samples = Sample.objects.all().order_by('sample_id')
context = {'samples': samples}
return context
@Render("bulk/sample_detail.html")
def sample_detail(request, pk):
"""sample detail page."""
sample = get_object_or_404(Sample, pk=pk)
context = {
'sample': sample
}
return context
@Render("bulk/sample_create.html")
@login_required()
def sample_create(request):
"""sample create page."""
if request.method == 'POST':
form = SampleForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
additional_info_formset = SampleClinicalInfoInlineFormset(
request.POST,
instance=instance
)
if additional_info_formset.is_valid():
additional_info_formset.save()
msg = "Successfully created the Sample."
messages.success(request, msg)
return HttpResponseRedirect(instance.get_absolute_url())
else:
msg = "Failed to create the sample. Please fix the errors below."
messages.error(request, msg)
formset = SampleClinicalInfoInlineFormset()
else:
form = SampleForm()
formset = SampleClinicalInfoInlineFormset()
context = {
'form': form,
'formset': formset,
'no_show_fields': ['id', 'sample', 'DELETE'],
}
return context
@Render("bulk/sample_update.html")
@login_required()
def sample_update(request, pk):
"""sample update page."""
sample = get_object_or_404(Sample, pk=pk)
if request.method == 'POST':
form = SampleForm(request.POST, instance=sample)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
additional_info_formset = SampleClinicalInfoInlineFormset(
request.POST,
instance=instance
)
if additional_info_formset.is_valid():
additional_info_formset.save()
msg = "Successfully updated the Sample."
messages.success(request, msg)
return HttpResponseRedirect(instance.get_absolute_url())
else:
msg = "Failed to update the sample. Please fix the errors below."
messages.error(request, msg)
formset = SampleClinicalInfoInlineFormset(instance=sample)
else:
form = SampleForm(instance=sample)
formset = SampleClinicalInfoInlineFormset(instance=sample)
context = {
'form': form,
'formset': formset,
'pk': pk,
'no_show_fields': ['id', 'sample', 'DELETE'],
}
return context
@Render("bulk/sample_delete.html")
@login_required()
def sample_delete(request, pk):
"""sample delete page."""
sample = get_object_or_404(Sample, pk=pk)
if request.method == 'POST':
sample.delete()
msg = "Successfully deleted the Sample."
messages.success(request, msg)
return HttpResponseRedirect(reverse('bulk:sample_list'))
context = {
'sample': sample,
'pk': pk
}
return context
#============================
# Library views
#----------------------------
@Render("bulk/library_list.html")
def library_list(request):
"""list of libraries."""
libraries = Library.objects.all().order_by('library_id')
context = {'libraries': libraries}
return context
@Render("bulk/library_detail.html")
def library_detail(request, pk):
"""library detail page."""
library = get_object_or_404(Library, pk=pk)
context = {
'library': library,
}
return context
@Render("bulk/library_delete.html")
@login_required()
def library_delete(request, pk):
"""library delete page."""
library = get_object_or_404(Library, pk=pk)
if request.method == 'POST':
library.delete()
msg = "Successfully deleted the Library."
messages.success(request, msg)
return HttpResponseRedirect(reverse('bulk:library_list'))
context = {
'library': library,
'pk': pk
}
return context
@method_decorator(login_required, name='dispatch')
class LibraryCreate(TemplateView):
"""
Library create page.
"""
template_name = "bulk/library_create.html"
def get_context_data(self, from_sample=None):
if from_sample:
sample = get_object_or_404(Sample, pk=from_sample)
else:
sample = None
context = {
'lib_form': LibraryForm(),
'sample': str(sample),
'sample_id': from_sample,
'related_libs': Library.objects.all()
}
return context
def get(self, request, from_sample=None, *args, **kwargs):
context = self.get_context_data(from_sample)
return render(request, self.template_name, context)
def post(self, request, from_sample=None, *args, **kwargs):
context = self.get_context_data(from_sample)
## this is becaues of this django feature:
## https://code.djangoproject.com/ticket/1130
# request.POST['projects'] = ','.join(request.POST.getlist('projects'))
lib_form = LibraryForm(request.POST)
context['lib_form'] = lib_form
if lib_form.is_valid():
## if 'commit=True' when saving lib_form, then it strangely
## raises the following error when trying to save the
## ManyToMany 'Projects' field:
## 'LibraryForm' object has no attribute 'save_m2m'.
instance = lib_form.save(commit=False)
all_valid, formsets = self._validate_formsets(request, instance)
context.update(formsets)
if all_valid:
instance.save()
# save the ManyToMany field.
lib_form.save_m2m()
# save the formsets.
[formset.save() for formset in formsets.values()]
msg = "Successfully created the Library."
messages.success(request, msg)
return HttpResponseRedirect(instance.get_absolute_url())
msg = "Failed to create the library. Please fix the errors below."
messages.error(request, msg)
return render(request, self.template_name, context)
def _validate_formsets(self, request, instance):
all_valid = True
formsets = {
# 'libdetail_formset': LibrarySampleDetailInlineFormset(
# request.POST,
# instance=instance
# ),
# 'libcons_formset': LibraryConstructionInfoInlineFormset(
# request.POST,
# instance=instance
# ),
# 'libqs_formset': LibraryQuantificationAndStorageInlineFormset(
# request.POST,
# request.FILES or None,
# instance=instance
# )
}
for k, formset in formsets.items():
if not formset.is_valid():
all_valid = False
formsets[k] = formset
return all_valid, formsets
class LibraryUpdate(LibraryCreate):
"""
Library update page.
"""
template_name = "bulk/library_update.html"
def get_context_data(self, pk):
library = get_object_or_404(Library, pk=pk)
# selected_projects = library.projects.names()
selected_related_libs = library.relates_to.only()
context = {
'pk': pk,
'lib_form': LibraryForm(instance=library),
# 'projects': [t.name for t in Tag.objects.all()],
# 'selected_projects': selected_projects,
'related_libs': Library.objects.all(),
'selected_related_libs': selected_related_libs
}
return context
def get(self, request, pk, *args, **kwargs):
context = self.get_context_data(pk)
return render(request, self.template_name, context)
def post(self, request, pk, *args, **kwargs):
context = self.get_context_data(pk)
## this is becaues of this django feature:
## https://code.djangoproject.com/ticket/1130
# request.POST['projects'] = ','.join(request.POST.getlist('projects'))
library = get_object_or_404(Library, pk=pk)
lib_form = LibraryForm(request.POST, instance=library)
context['lib_form'] = lib_form
if lib_form.is_valid():
# if 'commit=True' when saving lib_form, then it strangely
# raises the following error when trying to save the
# ManyToMany 'Projects' field:
# 'LibraryForm' object has no attribute 'save_m2m'.
instance = lib_form.save(commit=False)
all_valid, formsets = self._validate_formsets(request, instance)
context.update(formsets)
if all_valid:
instance.save()
# save the ManyToMany field.
lib_form.save_m2m()
# save the formsets.
[formset.save() for formset in formsets.values()]
msg = "Successfully created the Library."
messages.success(request, msg)
return HttpResponseRedirect(instance.get_absolute_url())
msg = "Failed to create the library. Please fix the errors below."
messages.error(request, msg)
return render(request, self.template_name, context)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
import django.views.debug
import desktop.conf
import desktop.log.log_buffer
from desktop.api import massaged_tags_for_json, massaged_documents_for_json,\
_get_docs
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG
from desktop.lib.django_util import login_notrequired, render_json, render
from desktop.lib.i18n import smart_str
from desktop.lib.paths import get_desktop_root
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings
from desktop import appmanager
LOG = logging.getLogger(__name__)
def home(request):
docs = _get_docs(request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'json_documents': json.dumps(massaged_documents_for_json(docs, request.user)),
'json_tags': json.dumps(massaged_tags_for_json(docs, request.user)),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l) + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
logging.exception("Couldn't construct zip file to write logs to: %s") % e
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [ (app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name) ]
# Iterator over the streams.
concatenated = [ "\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None ]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, mimetype='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
if sys.version_info[0:2] <= (2,4):
def _threads():
import threadframe
return threadframe.dict().iteritems()
else:
def _threads():
return sys._current_frames().iteritems()
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = []
for thread_id, stack in _threads():
out.append("Thread id: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
out.append(" %-20s %s(%d)" % (name, filename, lineno))
out.append(" %-80s" % (line))
out.append("")
return HttpResponse("\n".join(out), content_type="text/plain")
@access_log_level(logging.WARN)
def memory(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if not hasattr(settings, 'MEMORY_PROFILER'):
return HttpResponse(_("You must enable the memory profiler via the memory_profiler config in the hue.ini."))
# type, from, to, index
command_order = {
'type': 0,
'from': 1,
'to': 2,
'index': 3
}
default_command = [None, None, None, None]
commands = []
for item in request.GET:
res = re.match(r'(?P<command>\w+)\.(?P<count>\d+)', item)
if res:
d = res.groupdict()
count = int(d['count'])
command = str(d['command'])
while len(commands) <= count:
commands.append(default_command[:])
commands[count][command_order.get(command)] = request.GET.get(item)
heap = settings.MEMORY_PROFILER.heap()
for command in commands:
if command[0] is not None:
heap = getattr(heap, command[0])
if command[1] is not None and command[2] is not None:
heap = heap[int(command[1]):int(command[2])]
if command[3] is not None:
heap = heap[int(command[3])]
return HttpResponse(str(heap), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
@login_notrequired
def unsupported(request):
return render('unsupported.mako', request, None)
def index(request):
if request.user.is_superuser and request.COOKIES.get('hueLandingPage') != 'home':
return redirect(reverse('about:index'))
else:
return home(request)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def who_am_i(request):
"""
Returns username and FS username, and optionally sleeps.
"""
try:
sleep = float(request.REQUEST.get("sleep") or 0.0)
except ValueError:
sleep = 0.0
time.sleep(sleep)
return HttpResponse(request.user.username + "\t" + request.fs.user + "\n")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in [
'beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser',
'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy', 'rdbms', 'spark']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user,
'is_demo': desktop.conf.DEMO_ENABLED.get()
})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': collect_usage(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
def collect_usage():
return desktop.conf.COLLECT_USAGE.get() and Settings.get_settings().collect_usage
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
error_list.extend(validator(request.user))
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
return render('check_config.mako', request, {
'error_list': _get_config_errors(request, cache=False),
'conf_dir': conf_dir
},
force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import datetime
import json
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.notifier import api as notifier
from nova import rpc
from nova.rpc import common as rpc_common
from nova.scheduler import driver
from nova.scheduler import manager
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
FLAGS = flags.FLAGS
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager"""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'nova.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_get_host_list(self):
expected = 'fake_hosts'
self.mox.StubOutWithMock(self.manager.driver, 'get_host_list')
self.manager.driver.get_host_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.manager.get_host_list(self.context)
self.assertEqual(result, expected)
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
self.mox.StubOutWithMock(self.manager.driver,
'get_service_capabilities')
self.manager.driver.get_service_capabilities().AndReturn(
expected)
self.mox.ReplayAll()
result = self.manager.get_service_capabilities(self.context)
self.assertEqual(result, expected)
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.manager.driver,
'update_service_capabilities')
# Test no capabilities passes empty dictionary
self.manager.driver.update_service_capabilities(service_name,
host, {})
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host)
self.mox.VerifyAll()
self.mox.ResetAll()
# Test capabilities passes correctly
capabilities = {'fake_capability': 'fake_value'}
self.manager.driver.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.manager.update_service_capabilities(self.context,
service_name=service_name, host=host,
capabilities=capabilities)
def test_existing_method(self):
def stub_method(self, *args, **kwargs):
pass
setattr(self.manager.driver, 'schedule_stub_method', stub_method)
self.mox.StubOutWithMock(self.manager.driver,
'schedule_stub_method')
self.manager.driver.schedule_stub_method(self.context,
*self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.stub_method(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_missing_method_fallback(self):
self.mox.StubOutWithMock(self.manager.driver, 'schedule')
self.manager.driver.schedule(self.context, self.topic,
'noexist', *self.fake_args, **self.fake_kwargs)
self.mox.ReplayAll()
self.manager.noexist(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_show_host_resources(self):
host = 'fake_host'
computes = [{'host': host,
'compute_node': [{'vcpus': 4,
'vcpus_used': 2,
'memory_mb': 1024,
'memory_mb_used': 512,
'local_gb': 1024,
'local_gb_used': 512}]}]
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
'root_gb': 128,
'ephemeral_gb': 0},
{'project_id': 'project1',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 384,
'ephemeral_gb': 0},
{'project_id': 'project2',
'vcpus': 2,
'memory_mb': 256,
'root_gb': 256,
'ephemeral_gb': 0}]
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.service_get_all_compute_by_host(self.context, host).AndReturn(
computes)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
result = self.manager.show_host_resources(self.context, host)
expected = {'usage': {'project1': {'memory_mb': 384,
'vcpus': 3,
'root_gb': 512,
'ephemeral_gb': 0},
'project2': {'memory_mb': 256,
'vcpus': 2,
'root_gb': 256,
'ephemeral_gb': 0}},
'resource': {'vcpus': 4,
'vcpus_used': 2,
'local_gb': 1024,
'local_gb_used': 512,
'memory_mb': 1024,
'memory_mb_used': 512}}
self.assertDictMatch(result, expected)
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
def stub_method(*args, **kwargs):
pass
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
method_name)
def test_schedule_exeception_changes_state_notifies_and_raises(self):
"""Test that an exception scheduling calls
_set_vm_state_and_notify and reraises
"""
fake_instance_uuid = 'fake-instance-id'
self._mox_schedule_method_helper('schedule_something')
self.mox.StubOutWithMock(self.manager, '_set_vm_state_and_notify')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
ex = self.AnException('something happened')
self.manager.driver.schedule_something(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(ex)
# Adding the context to the args is kind of gnarly, but thats what
# happens. Could be refactored to keep all the context, spec, topic
# stuff a bit cleaner.
self.manager._set_vm_state_and_notify('something',
{'vm_state': vm_states.ERROR}, self.context,
ex, *((self.context,) + self.fake_args), **self.fake_kwargs)
self.mox.ReplayAll()
self.assertRaises(self.AnException, self.manager.something,
self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_run_instance_exception_puts_instance_in_error_state(self):
"""Test that an NoValidHost exception for run_instance puts
the instance in ERROR state and eats the exception.
"""
fake_instance_uuid = 'fake-instance-id'
self._mox_schedule_method_helper('schedule_run_instance')
self.mox.StubOutWithMock(db, 'instance_update')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
self.manager.driver.schedule_run_instance(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(
exception.NoValidHost(reason=""))
db.instance_update(self.context, fake_instance_uuid,
{'vm_state': vm_states.ERROR})
self.mox.ReplayAll()
self.manager.run_instance(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_prep_resize_no_valid_host_back_in_active_state(self):
"""Test that a NoValidHost exception for prep_resize puts
the instance in ACTIVE state
"""
fake_instance_uuid = 'fake-instance-id'
self._mox_schedule_method_helper('schedule_prep_resize')
self.mox.StubOutWithMock(db, 'instance_update')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
self.manager.driver.schedule_prep_resize(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(
exception.NoValidHost(reason=""))
db.instance_update(self.context, fake_instance_uuid,
{'vm_state': vm_states.ACTIVE,
'task_state': None})
self.mox.ReplayAll()
self.manager.prep_resize(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
def test_prep_resize_exception_host_in_error_state_and_raise(self):
"""Test that a NoValidHost exception for prep_resize puts
the instance in ACTIVE state
"""
fake_instance_uuid = 'fake-instance-id'
self._mox_schedule_method_helper('schedule_prep_resize')
self.mox.StubOutWithMock(db, 'instance_update')
request_spec = {'instance_properties':
{'uuid': fake_instance_uuid}}
self.fake_kwargs['request_spec'] = request_spec
self.manager.driver.schedule_prep_resize(self.context,
*self.fake_args, **self.fake_kwargs).AndRaise(
self.AnException('something happened'))
db.instance_update(self.context, fake_instance_uuid,
{'vm_state': vm_states.ERROR})
self.mox.ReplayAll()
self.assertRaises(self.AnException, self.manager.prep_resize,
self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class"""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_get_host_list(self):
expected = 'fake_hosts'
self.mox.StubOutWithMock(self.driver.host_manager, 'get_host_list')
self.driver.host_manager.get_host_list().AndReturn(expected)
self.mox.ReplayAll()
result = self.driver.get_host_list()
self.assertEqual(result, expected)
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
self.mox.StubOutWithMock(self.driver.host_manager,
'get_service_capabilities')
self.driver.host_manager.get_service_capabilities().AndReturn(
expected)
self.mox.ReplayAll()
result = self.driver.get_service_capabilities()
self.assertEqual(result, expected)
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
self.mox.StubOutWithMock(self.driver.host_manager,
'update_service_capabilities')
capabilities = {'fake_capability': 'fake_value'}
self.driver.host_manager.update_service_capabilities(
service_name, host, capabilities)
self.mox.ReplayAll()
result = self.driver.update_service_capabilities(service_name,
host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
utils.service_is_up(service1).AndReturn(False)
utils.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
def test_create_instance_db_entry(self):
base_options = {'fake_option': 'meow'}
image = 'fake_image'
instance_type = 'fake_instance_type'
security_group = 'fake_security_group'
block_device_mapping = 'fake_block_device_mapping'
request_spec = {'instance_properties': base_options,
'image': image,
'instance_type': instance_type,
'security_group': security_group,
'block_device_mapping': block_device_mapping}
self.mox.StubOutWithMock(self.driver.compute_api,
'create_db_entry_for_new_instance')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
# New entry
fake_instance = {'uuid': 'fake-uuid'}
self.driver.compute_api.create_db_entry_for_new_instance(
self.context, instance_type, image, base_options,
security_group,
block_device_mapping).AndReturn(fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec)
self.mox.VerifyAll()
self.assertEqual(instance, fake_instance)
# Entry created by compute already
self.mox.ResetAll()
fake_uuid = 'fake-uuid'
base_options['uuid'] = fake_uuid
fake_instance = {'uuid': fake_uuid}
db.instance_get_by_uuid(self.context, fake_uuid).AndReturn(
fake_instance)
self.mox.ReplayAll()
instance = self.driver.create_instance_db_entry(self.context,
request_spec)
self.assertEqual(instance, fake_instance)
def _live_migration_instance(self):
volume1 = {'id': 31338}
volume2 = {'id': 31339}
return {'id': 31337,
'uuid': 'fake_uuid',
'name': 'fake-instance',
'host': 'fake_host1',
'volumes': [volume1, volume2],
'power_state': power_state.RUNNING,
'memory_mb': 1024,
'root_gb': 1024,
'ephemeral_gb': 0}
def test_live_migration_basic(self):
"""Test basic schedule_live_migration functionality"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
self.driver._live_migration_common_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.instance_update(self.context, instance['id'],
{'vm_state': vm_states.MIGRATING})
db.volume_update(self.context, instance['volumes'][0]['id'],
{'status': 'migrating'})
db.volume_update(self.context, instance['volumes'][1]['id'],
{'status': 'migrating'})
driver.cast_to_compute_host(self.context, instance['host'],
'live_migration', update_db=False,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
self.mox.ReplayAll()
self.driver.schedule_live_migration(self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
"""Test live migration when all checks pass."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
dest = 'fake_host2'
block_migration = True
disk_over_commit = True
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Source checks (volume and source compute are up)
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(True)
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(['fake_service2'])
utils.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
self.driver._get_compute_info(self.context, dest,
'memory_mb').AndReturn(2048)
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# assert_compute_node_has_enough_disk()
self.driver._get_compute_info(self.context, dest,
'disk_available_least').AndReturn(1025)
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue1')
rpc.call(self.context, 'src_queue1',
{'method': 'get_instance_disk_info',
'args': {'instance_name': instance['name']}}).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
# Common checks (shared storage ok, same hypervisor,e tc)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# newer hypervisor version for src
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1,
'cpu_info': 'fake_cpu_info'}]}])
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
rpc.call(self.context, 'dest_queue',
{'method': 'compare_cpu',
'args': {'cpu_info': 'fake_cpu_info'}}).AndReturn(True)
db.instance_update(self.context, instance['id'],
{'vm_state': vm_states.MIGRATING})
db.volume_update(self.context, instance['volumes'][0]['id'],
{'status': 'migrating'})
db.volume_update(self.context, instance['volumes'][1]['id'],
{'status': 'migrating'})
driver.cast_to_compute_host(self.context, instance['host'],
'live_migration', update_db=False,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
self.mox.ReplayAll()
result = self.driver.schedule_live_migration(self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
"""The instance given by instance_id is not running."""
self.mox.StubOutWithMock(db, 'instance_get')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
instance['power_state'] = power_state.NOSTATE
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotRunning,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_volume_node_not_alive(self):
"""Raise exception when volume node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Volume down
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.VolumeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_compute_src_not_alive(self):
"""Raise exception when src compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
# Volume up
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
['fake_service'])
utils.service_is_up('fake_service').AndReturn(True)
# Compute down
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(['fake_service2'])
utils.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_compute_dest_not_alive(self):
"""Raise exception when dest compute node is not alive."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
dest = 'fake_host2'
block_migration = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
# Compute is down
utils.service_is_up('fake_service3').AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
def test_live_migration_dest_check_service_same_host(self):
"""Confirms exception raises in case dest and src is same host."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest,
'memory_mb').AndReturn(2048)
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=1024), dict(memory_mb=512)])
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_block_migration_dest_check_service_lack_disk(self):
"""Confirms exception raises when dest doesn't have enough disk."""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
self.mox.StubOutWithMock(utils, 'service_is_up')
self.mox.StubOutWithMock(self.driver,
'assert_compute_node_has_enough_memory')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
dest = 'fake_host2'
block_migration = True
disk_over_commit = True
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
db.service_get_all_compute_by_host(self.context,
dest).AndReturn(['fake_service3'])
utils.service_is_up('fake_service3').AndReturn(True)
# Enough memory
self.driver.assert_compute_node_has_enough_memory(self.context,
instance, dest)
# Not enough disk
self.driver._get_compute_info(self.context, dest,
'disk_available_least').AndReturn(1023)
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
rpc.call(self.context, 'src_queue',
{'method': 'get_instance_disk_info',
'args': {'instance_name': instance['name']}}).AndReturn(
json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_shared_storage_raises(self):
"""Src and dest must have same shared storage for live migration"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
self.mox.ReplayAll()
# FIXME(comstud): See LP891756.
self.assertRaises(exception.FileNotFound,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_same_shared_storage_okay(self):
"""live migration works with same src and dest shared storage"""
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(False)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
self.mox.ReplayAll()
# FIXME(comstud): See LP891756.
self.assertRaises(exception.FileNotFound,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# different hypervisor type
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'not-xen',
'hypervisor_version': 1}]}])
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
# newer hypervisor version for src
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 2}]}])
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def test_live_migration_dest_host_incompatable_cpu_raises(self):
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
dest = 'fake_host2'
block_migration = False
disk_over_commit = False
instance = self._live_migration_instance()
db.instance_get(self.context, instance['id']).AndReturn(instance)
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
db.queue_get_for(self.context, FLAGS.compute_topic,
instance['host']).AndReturn('src_queue')
tmp_filename = 'test-filename'
rpc.call(self.context, 'dest_queue',
{'method': 'create_shared_storage_test_file'}
).AndReturn(tmp_filename)
rpc.call(self.context, 'src_queue',
{'method': 'check_shared_storage_test_file',
'args': {'filename': tmp_filename}}).AndReturn(True)
rpc.call(self.context, 'dest_queue',
{'method': 'cleanup_shared_storage_test_file',
'args': {'filename': tmp_filename}})
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1}]}])
db.service_get_all_compute_by_host(self.context,
instance['host']).AndReturn(
[{'compute_node': [{'hypervisor_type': 'xen',
'hypervisor_version': 1,
'cpu_info': 'fake_cpu_info'}]}])
db.queue_get_for(self.context, FLAGS.compute_topic,
dest).AndReturn('dest_queue')
rpc.call(self.context, 'dest_queue',
{'method': 'compare_cpu',
'args': {'cpu_info': 'fake_cpu_info'}}).AndRaise(
rpc_common.RemoteError())
self.mox.ReplayAll()
self.assertRaises(rpc_common.RemoteError,
self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that can't will fail if the driver is changed"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
def test_unimplemented_schedule_run_instance(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
fake_request_spec = {'instance_properties':
{'uuid': 'uuid'}}
self.assertRaises(NotImplementedError,
self.driver.schedule_run_instance,
self.context, fake_request_spec,
*fake_args, **fake_kwargs)
def test_unimplemented_schedule_prep_resize(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
fake_request_spec = {'instance_properties':
{'uuid': 'uuid'}}
self.assertRaises(NotImplementedError,
self.driver.schedule_prep_resize,
self.context, fake_request_spec,
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods"""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
def test_cast_to_volume_host_update_db_with_volume_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'volume_id': 31337,
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(db, 'volume_update')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
db.volume_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_volume_host_update_db_without_volume_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_volume_host_no_update_db(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_volume_host(self.context, host, method,
update_db=False, **fake_kwargs)
def test_cast_to_compute_host_update_db_with_instance_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'instance_id': 31337,
'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(utils, 'utcnow')
self.mox.StubOutWithMock(db, 'instance_update')
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
utils.utcnow().AndReturn('fake-now')
db.instance_update(self.context, 31337,
{'host': host, 'scheduled_at': 'fake-now'})
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_compute_host_update_db_without_instance_id(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_compute_host_no_update_db(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_compute_host(self.context, host, method,
update_db=False, **fake_kwargs)
def test_cast_to_network_host(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, 'network', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_network_host(self.context, host, method,
update_db=True, **fake_kwargs)
def test_cast_to_host_compute_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
driver.cast_to_compute_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'compute', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_volume_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_volume_host')
driver.cast_to_volume_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'volume', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_network_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
self.mox.StubOutWithMock(driver, 'cast_to_network_host')
driver.cast_to_network_host(self.context, host, method,
update_db=False, **fake_kwargs)
self.mox.ReplayAll()
driver.cast_to_host(self.context, 'network', host, method,
update_db=False, **fake_kwargs)
def test_cast_to_host_unknown_topic(self):
host = 'fake_host1'
method = 'fake_method'
fake_kwargs = {'extra_arg': 'meow'}
topic = 'unknown'
queue = 'fake_queue'
self.mox.StubOutWithMock(db, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'cast')
db.queue_get_for(self.context, topic, host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,
'args': fake_kwargs})
self.mox.ReplayAll()
driver.cast_to_host(self.context, topic, host, method,
update_db=False, **fake_kwargs)
def test_encode_instance(self):
instance = {'id': 31337,
'test_arg': 'meow'}
result = driver.encode_instance(instance, True)
expected = {'id': instance['id'], '_is_precooked': False}
self.assertDictMatch(result, expected)
# Orig dict not changed
self.assertNotEqual(result, instance)
result = driver.encode_instance(instance, False)
expected = {}
expected.update(instance)
expected['_is_precooked'] = True
self.assertDictMatch(result, expected)
# Orig dict not changed
self.assertNotEqual(result, instance)
|
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import functools
import os
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl import experiments
from chainerrl import explorers
from chainerrl import misc
from chainerrl import replay_buffer
class CastAction(gym.ActionWrapper):
"""Cast actions to a given type."""
def __init__(self, env, type_):
super().__init__(env)
self.type_ = type_
def action(self, action):
return self.type_(action)
class TransposeObservation(gym.ObservationWrapper):
"""Transpose observations."""
def __init__(self, env, axes):
super().__init__(env)
self._axes = axes
assert isinstance(env.observation_space, gym.spaces.Box)
self.observation_space = gym.spaces.Box(
low=env.observation_space.low.transpose(*self._axes),
high=env.observation_space.high.transpose(*self._axes),
dtype=env.observation_space.dtype,
)
def observation(self, observation):
return observation.transpose(*self._axes)
class ObserveElapsedSteps(gym.Wrapper):
"""Observe the number of elapsed steps in an episode.
A new observation will be a tuple of an original observation and an integer
that is equal to the elapsed steps in an episode.
"""
def __init__(self, env, max_steps):
super().__init__(env)
self._max_steps = max_steps
self._elapsed_steps = 0
self.observation_space = gym.spaces.Tuple((
env.observation_space,
gym.spaces.Discrete(self._max_steps + 1),
))
def reset(self):
self._elapsed_steps = 0
return self.env.reset(), self._elapsed_steps
def step(self, action):
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
assert self._elapsed_steps <= self._max_steps
return (observation, self._elapsed_steps), reward, done, info
class RecordMovie(gym.Wrapper):
"""Record MP4 videos using pybullet's logging API."""
def __init__(self, env, dirname):
super().__init__(env)
self._episode_idx = -1
self._dirname = dirname
def reset(self):
obs = self.env.reset()
self._episode_idx += 1
import pybullet
pybullet.startStateLogging(
pybullet.STATE_LOGGING_VIDEO_MP4,
os.path.join(self._dirname, '{}.mp4'.format(self._episode_idx)))
return obs
class GraspingQFunction(chainer.Chain):
"""Q-function model for the grasping env.
This model takes an 84x84 2D image and an integer that indicates the
number of elapsed steps in an episode as input and outputs action values.
"""
def __init__(self, n_actions, max_episode_steps):
super().__init__()
with self.init_scope():
self.embed = L.EmbedID(max_episode_steps + 1, 3136)
self.image2hidden = chainerrl.links.Sequence(
L.Convolution2D(None, 32, 8, stride=4),
F.relu,
L.Convolution2D(None, 64, 4, stride=2),
F.relu,
L.Convolution2D(None, 64, 3, stride=1),
functools.partial(F.reshape, shape=(-1, 3136)),
)
self.hidden2out = chainerrl.links.Sequence(
L.Linear(None, 512),
F.relu,
L.Linear(None, n_actions),
DiscreteActionValue,
)
def __call__(self, x):
image, steps = x
h = self.image2hidden(image) * F.sigmoid(self.embed(steps))
return self.hidden2out(h)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=0,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--demo', action='store_true', default=False,
help='Evaluate the agent without training.')
parser.add_argument('--load', type=str, default=None,
help='Load a saved agent from a given directory.')
parser.add_argument('--final-exploration-steps',
type=int, default=5 * 10 ** 5,
help='Timesteps after which we stop'
' annealing exploration rate')
parser.add_argument('--final-epsilon', type=float, default=0.2,
help='Final value of epsilon during training.')
parser.add_argument('--steps', type=int, default=2 * 10 ** 6,
help='Total number of timesteps to train the agent.')
parser.add_argument('--replay-start-size', type=int, default=5 * 10 ** 4,
help='Minimum replay buffer size before'
' performing gradient updates.')
parser.add_argument('--target-update-interval',
type=int, default=1 * 10 ** 4,
help='Frequency (in timesteps) at which'
' the target network is updated.')
parser.add_argument('--eval-interval', type=int, default=10 ** 5,
help='Frequency (in timesteps) of evaluation phase.')
parser.add_argument('--update-interval', type=int, default=1,
help='Frequency (in timesteps) of network updates.')
parser.add_argument('--eval-n-runs', type=int, default=100,
help='Number of episodes used for evaluation.')
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--render', action='store_true', default=False,
help='Render env states in a GUI window.')
parser.add_argument('--lr', type=float, default=6.25e-5,
help='Learning rate')
parser.add_argument('--num-envs', type=int, default=1,
help='Number of envs run in parallel.')
parser.add_argument('--batch-size', type=int, default=32,
help='Batch size used for training.')
parser.add_argument('--record', action='store_true', default=False,
help='Record videos of evaluation envs.'
' --render should also be specified.')
parser.add_argument('--gamma', type=float, default=0.99,
help='Discount factor.')
args = parser.parse_args()
import logging
logging.basicConfig(level=args.logging_level)
# Set a random seed used in ChainerRL.
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
print('Output files are saved in {}'.format(args.outdir))
max_episode_steps = 8
def make_env(idx, test):
from pybullet_envs.bullet.kuka_diverse_object_gym_env import KukaDiverseObjectEnv # NOQA
# Use different random seeds for train and test envs
process_seed = int(process_seeds[idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
# Set a random seed for this subprocess
misc.set_random_seed(env_seed)
env = KukaDiverseObjectEnv(
isDiscrete=True,
renders=args.render and (args.demo or not test),
height=84,
width=84,
maxSteps=max_episode_steps,
isTest=test,
)
assert env.observation_space is None
env.observation_space = gym.spaces.Box(
low=0, high=255, shape=(84, 84, 3), dtype=np.uint8)
# (84, 84, 3) -> (3, 84, 84)
env = TransposeObservation(env, (2, 0, 1))
env = ObserveElapsedSteps(env, max_episode_steps)
# KukaDiverseObjectEnv internally asserts int actions and does not
# accept python-future's newint.
env = CastAction(env, __builtins__.int)
env.seed(int(env_seed))
if test and args.record:
assert args.render,\
'To use --record, --render needs be specified.'
video_dir = os.path.join(args.outdir, 'video_{}'.format(idx))
os.mkdir(video_dir)
env = RecordMovie(env, video_dir)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx in range(args.num_envs)])
eval_env = make_batch_env(test=True)
n_actions = eval_env.action_space.n
q_func = GraspingQFunction(n_actions, max_episode_steps)
# Draw the computational graph and save it in the output directory.
fake_obs = (
np.zeros((3, 84, 84), dtype=np.float32)[None],
np.zeros((), dtype=np.int32)[None],
)
chainerrl.misc.draw_computational_graph(
[q_func(fake_obs)],
os.path.join(args.outdir, 'model'))
# Use the hyper parameters of the Nature paper
opt = optimizers.RMSpropGraves(
lr=args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
opt.setup(q_func)
# Anneal beta from beta0 to 1 throughout training
betasteps = args.steps / args.update_interval
rbuf = replay_buffer.PrioritizedReplayBuffer(
10 ** 6, alpha=0.6, beta0=0.4, betasteps=betasteps)
explorer = explorers.LinearDecayEpsilonGreedy(
1.0, args.final_epsilon,
args.final_exploration_steps,
lambda: np.random.randint(n_actions))
def phi(x):
# Feature extractor
image, elapsed_steps = x
# Normalize RGB values: [0, 255] -> [0, 1]
norm_image = np.asarray(image, dtype=np.float32) / 255
return norm_image, elapsed_steps
agent = chainerrl.agents.DoubleDQN(
q_func,
opt,
rbuf,
gpu=args.gpu,
gamma=args.gamma,
explorer=explorer,
minibatch_size=args.batch_size,
replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
update_interval=args.update_interval,
batch_accumulator='sum',
phi=phi,
)
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(test=False),
eval_env=eval_env,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
outdir=args.outdir,
save_best_so_far_agent=False,
log_interval=1000,
)
if __name__ == '__main__':
main()
|
|
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), [])
for cdata_list_attr in itertools.chain(universal, tag_specific):
if cdata_list_attr in attrs:
# Basically, we have a "class" attribute whose
# value is a whitespace-separated list of CSS
# classes. Split it into a list.
value = attrs[cdata_list_attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[cdata_list_attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
|
# -*- coding: utf-8 -*-
from simulation import Controller
def _createSystem():
import fuzzy.System
system = fuzzy.System.System(description=
"""This fuzzy system is to control the inverted pendulum into an upright position as well as
at the position X=0.
It also is used to demonstrate some features of pyfuzzy.
This is the reason, it uses different fuzzy norm in normally
symmetrical rules.""")
from fuzzy.norm.AlgebraicProduct import AlgebraicProduct
from fuzzy.norm.AlgebraicSum import AlgebraicSum
from fuzzy.fuzzify.Plain import Plain
from fuzzy.defuzzify.COG import COG
# set defuzzification method and default norms
INF = AlgebraicProduct()
ACC = AlgebraicSum()
COM = AlgebraicSum()
CER = AlgebraicProduct()
COG = COG(INF=INF,ACC=ACC,failsafe = 0., segment_size=0.5)
from fuzzy.InputVariable import InputVariable
from fuzzy.OutputVariable import OutputVariable
from fuzzy.Adjective import Adjective
from fuzzy.set.Polygon import Polygon
angle = InputVariable(fuzzify=Plain(),description='angle',min=0.,max=360.,unit='degrees')
system.variables['Phi'] = angle
angle.adjectives['up_more_right'] = Adjective(Polygon([(0.,0.),(30.,1.),(60.,0.)]))
angle.adjectives['up_right'] = Adjective(Polygon([(30.,0.),(60.,1.),(90.,0.)]))
angle.adjectives['up'] = Adjective(Polygon([(60.,0.),(90.,1.),(120.,0.)]))
angle.adjectives['up_left'] = Adjective(Polygon([(90.,0.),(120.,1.),(150.,0.)]))
angle.adjectives['up_more_left'] = Adjective(Polygon([(120.,0.),(150.,1.),(180.,0.)]))
angle.adjectives['down_more_left'] = Adjective(Polygon([(180.,0.),(210.,1.),(240.,0.)]))
angle.adjectives['down_left'] = Adjective(Polygon([(210.,0.),(240.,1.),(270.,0.)]))
angle.adjectives['down'] = Adjective(Polygon([(240.,0.),(270.,1.),(300.,0.)]))
angle.adjectives['down_right'] = Adjective(Polygon([(270.,0.),(300.,1.),(330.,0.)]))
angle.adjectives['down_more_right'] = Adjective(Polygon([(300.,0.),(330.,1.),(360.,0.)]))
angle_velocity = InputVariable(fuzzify=Plain(),description='angle velocity',min=-600.,max=600.,unit='degrees per second')
system.variables['dPhi_dT'] = angle_velocity
angle_velocity.adjectives['cw_fast'] = Adjective(Polygon([(-600.,1.),(-300.,0.)]))
angle_velocity.adjectives['cw_slow'] = Adjective(Polygon([(-600.,0.),(-300.,1.),(0.,0.)]))
angle_velocity.adjectives['stop'] = Adjective(Polygon([(-300.,0.),(0.,1.),(300.,0.)]))
angle_velocity.adjectives['ccw_slow'] = Adjective(Polygon([(0.,0.),(300.,1.),(600.,0.)]))
angle_velocity.adjectives['ccw_fast'] = Adjective(Polygon([(300.,0.),(600.,1.)]))
position = InputVariable(fuzzify=Plain(),description='position',min=-20.,max=20.,unit='meter')
system.variables['X'] = position
position.adjectives['left_far'] = Adjective(Polygon([(-20.,1.),(-10.,0.)]))
position.adjectives['left_near'] = Adjective(Polygon([(-20.,0.),(-5.,1.),(0.,0.)]))
position.adjectives['stop'] = Adjective(Polygon([(-5.,0.),(0.,1.),(5.,0.)]))
position.adjectives['right_near'] = Adjective(Polygon([(0.,0.),(5.,1.),(20.,0.)]))
position.adjectives['right_far'] = Adjective(Polygon([(10.,0.),(20.,1.)]))
velocity = InputVariable(fuzzify=Plain(),description='velocity',min=-10.,max=10.,unit='meter per second')
system.variables['dX_dT'] = velocity
velocity.adjectives['left_fast'] = Adjective(Polygon([(-10.,1.),(-5.,0.)]))
velocity.adjectives['left_slow'] = Adjective(Polygon([(-10.,0.),(-2.,1.),(0.,0.)]))
velocity.adjectives['stop'] = Adjective(Polygon([(-2.,0.),(0.,1.),(2.,0.)]))
velocity.adjectives['right_slow'] = Adjective(Polygon([(0.,0.),(2.,1.),(10.,0.)]))
velocity.adjectives['right_fast'] = Adjective(Polygon([(5.,0.),(10.,1.)]))
acceleration = OutputVariable(defuzzify=COG,description='acceleration',min=-50.,max=50.,unit='meter per second^2')
system.variables['a'] = acceleration
acceleration.adjectives['left_fast'] = a_left_fast = Adjective(Polygon([(-50.,0.),(-20.,1.),(-10.,0.)]),COM=COM)
acceleration.adjectives['left_slow'] = a_left_slow = Adjective(Polygon([(-20.,0.),(-10.,1.),(0.,0.)]),COM=COM)
acceleration.adjectives['stop'] = a_stop = Adjective(Polygon([(-10.,0.),(0.,1.),(10.,0.)]),COM=COM)
acceleration.adjectives['right_slow'] = a_right_slow = Adjective(Polygon([(0.,0.),(10.,1.),(20.,0.)]),COM=COM)
acceleration.adjectives['right_fast'] = a_right_fast = Adjective(Polygon([(10.,0.),(20.,1.),(50.,0.)]),COM=COM)
from fuzzy.Rule import Rule
from fuzzy.norm.Max import Max
#from fuzzy.norm.Min import Min
#from fuzzy.norm.BoundedDifference import BoundedDifference
#from fuzzy.norm.DrasticSum import DrasticSum
from fuzzy.norm.EinsteinSum import EinsteinSum
from fuzzy.norm.DombiUnion import DombiUnion
from fuzzy.operator.Compound import Compound
from fuzzy.operator.Input import Input
from fuzzy.operator.Not import Not
system.rules['stop'] = Rule(
adjective=a_stop,
# it gets its value from here
operator=Compound(
Max(),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up"]),
Input(system.variables["dPhi_dT"].adjectives["stop"])
),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up_right"]),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"])
),
Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["up_left"]),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"])
)
),
CER=CER
)
system.rules['tilts right'] = Rule(
adjective=a_right_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Not(
Compound(
AlgebraicProduct(),
Compound(
AlgebraicSum(),
Input(system.variables["X"].adjectives["left_near"]),
Input(system.variables["X"].adjectives["left_far"])
),
Compound(
EinsteinSum(),
Input(system.variables["dX_dT"].adjectives["left_slow"]),
Input(system.variables["dX_dT"].adjectives["left_fast"])
)
),
),
Input(system.variables["Phi"].adjectives["up_right"])
),
CER=CER
)
system.rules['tilts left'] = Rule(
adjective=a_left_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Not(
Compound(
AlgebraicProduct(),
Compound(
AlgebraicSum(),
Input(system.variables["X"].adjectives["right_near"]),
Input(system.variables["X"].adjectives["right_far"])
),
Compound(
DombiUnion(0.25),
Input(system.variables["dX_dT"].adjectives["right_slow"]),
Input(system.variables["dX_dT"].adjectives["right_fast"])
)
),
),
Input(system.variables["Phi"].adjectives["up_left"])
),
CER=CER
)
system.rules['far right'] = Rule(
adjective=a_right_fast,
# it gets its value from here
operator=Input(system.variables["Phi"].adjectives["up_more_right"]),
CER=CER
)
system.rules['far left'] = Rule(
adjective=a_left_fast,
# it gets its value from here
operator=Input(system.variables["Phi"].adjectives["up_more_left"]),
CER=CER
)
system.rules['accelerate cw if down'] = Rule(
adjective=a_right_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["down"]),
Compound(
AlgebraicProduct(),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"]),
Input(system.variables["dPhi_dT"].adjectives["cw_slow"]),
)
),
CER=CER
)
system.rules['accelerate ccw if down'] = Rule(
adjective=a_left_slow,
# it gets its value from here
operator=Compound(
AlgebraicProduct(),
Input(system.variables["Phi"].adjectives["down"]),
Compound(
AlgebraicProduct(),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"]),
Input(system.variables["dPhi_dT"].adjectives["ccw_slow"]),
)
),
CER=CER
)
return system
class FuzzyController(Controller.Controller):
"""Fuzzy controller."""
def __init__(self):
self.system = _createSystem()
def calculate(self,input={},output={'a':0.0}):
self.system.calculate(input,output)
# hold old value if no results in next calculations
self.system.variables['a'].defuzzify.failsafe = 0 #output['a']
return output
def createDoc(self,directory):
from fuzzy.doc.plot.gnuplot import doc
d = doc.Doc(directory)
d.createDoc(self.system)
d.overscan=0
d.create3DPlot(self.system,"Phi","dPhi_dT","a",{"X":0.,"dX_dT":0.})
def createDot(self,directory):
import fuzzy.doc.structure.dot.dot
import subprocess
for name,rule in self.system.rules.items():
cmd = "dot -T png -o '%s/Rule %s.png'" % (directory,name)
proc = subprocess.Popen(cmd, shell=True, bufsize=32768, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fuzzy.doc.structure.dot.dot.print_header(proc.stdin, "XXX")
fuzzy.doc.structure.dot.dot.print_dot(rule, proc.stdin, self.system, "")
fuzzy.doc.structure.dot.dot.print_footer(proc.stdin)
proc.communicate()
cmd = "dot -T png -o '%s/System.png'" % directory
proc = subprocess.Popen(cmd, shell=True, bufsize=32768, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fuzzy.doc.structure.dot.dot.printDot(self.system, proc.stdin)
proc.communicate()
|
|
"""Bayesian variant calling with FreeBayes.
https://github.com/ekg/freebayes
"""
import os
import sys
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, ploidy, vcfutils
from bcbio.variation.vcfutils import (get_paired_bams, is_paired_analysis,
move_vcf)
def region_to_freebayes(region):
if isinstance(region, (list, tuple)):
chrom, start, end = region
return "%s:%s..%s" % (chrom, start, end)
else:
return region
def _freebayes_options_from_config(items, config, out_file, region=None):
"""Prepare standard options from configuration input.
Input BED target files are merged to avoid overlapping regions which
cause FreeBayes to call multiple times.
"""
opts = []
opts += ["--ploidy", str(ploidy.get_ploidy(items, region))]
variant_regions = bedutils.merge_overlaps(utils.get_in(config, ("algorithm", "variant_regions")),
items[0])
target = subset_variant_regions(variant_regions, region, out_file, items)
if target:
if isinstance(target, basestring) and os.path.isfile(target):
opts += ["--targets", target]
else:
opts += ["--region", region_to_freebayes(target)]
resources = config_utils.get_resources("freebayes", config)
if resources.get("options"):
opts += resources["options"]
return opts
def _add_somatic_opts(opts, paired):
"""Add somatic options to current set. See _run_freebayes_paired for references.
"""
if "--min-alternate-fraction" not in opts and "-F" not in opts:
# add minimum reportable allele frequency
# FreeBayes defaults to 20%, but use 10% by default for the
# tumor case
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
opts += " --min-alternate-fraction %s" % min_af
# Recommended settings for cancer calling
opts += (" --pooled-discrete --pooled-continuous --genotype-qualities "
"--report-genotype-likelihood-max --allele-balance-priors-off")
return opts
def run_freebayes(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run FreeBayes variant calling, either paired tumor/normal or germline calling.
"""
if is_paired_analysis(align_bams, items):
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file, somatic=paired)
else:
call_file = _run_freebayes_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None, somatic=None):
"""Detect SNPs and indels with FreeBayes.
Performs post-filtering to remove very low quality variants which
can cause issues feeding into GATK. Breaks variants into individual
allelic primitives for analysis and evaluation.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
for align_bam in align_bams:
bam.index(align_bam, config)
freebayes = config_utils.get_program("freebayes", config)
vcffilter = config_utils.get_program("vcffilter", config)
input_bams = " ".join("-b %s" % x for x in align_bams)
opts = " ".join(_freebayes_options_from_config(items, config, out_file, region))
# Recommended options from 1000 genomes low-complexity evaluation
# https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ
opts += " --min-repeat-entropy 1 --experimental-gls"
if somatic:
opts = _add_somatic_opts(opts, somatic)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
cmd = ("{freebayes} -f {ref_file} {input_bams} {opts} | "
"{vcffilter} -f 'QUAL > 5' -s | {fix_ambig} | "
"vcfallelicprimitives --keep-info --keep-geno | vcffixup | vcfstreamsort | "
"vt normalize -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"),
ref_file, config)
return ann_file
def _run_freebayes_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with FreeBayes for paired tumor/normal samples.
Sources of options for FreeBayes:
mailing list: https://groups.google.com/d/msg/freebayes/dTWBtLyM4Vs/HAK_ZhJHguMJ
mailing list: https://groups.google.com/forum/#!msg/freebayes/LLH7ZfZlVNs/63FdD31rrfEJ
speedseq: https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L916
sga/freebayes: https://github.com/jts/sga-extra/blob/7e28caf71e8107b697f9be7162050e4fa259694b/
sga_generate_varcall_makefile.pl#L299
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
paired = get_paired_bams(align_bams, items)
assert paired.normal_bam, "Require normal BAM for FreeBayes paired calling and filtering"
freebayes = config_utils.get_program("freebayes", config)
opts = " ".join(_freebayes_options_from_config(items, config, out_file, region))
opts += " --min-repeat-entropy 1 --experimental-gls"
opts = _add_somatic_opts(opts, paired)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cl = ("{freebayes} -f {ref_file} {opts} "
"{paired.tumor_bam} {paired.normal_bam} "
"| vcffilter -f 'QUAL > 5' -s "
"| {py_cl} -x 'bcbio.variation.freebayes.call_somatic(x)' "
"| {fix_ambig} | "
"vcfallelicprimitives --keep-info --keep-geno | vcffixup | vcfstreamsort | "
"vt normalize -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
bam.index(paired.tumor_bam, config)
bam.index(paired.normal_bam, config)
do.run(cl.format(**locals()), "Genotyping paired variants with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"), ref_file,
config)
return ann_file
# ## Filtering
def _check_lods(parts, tumor_thresh, normal_thresh):
"""Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
"""
try:
gl_index = parts[8].split(":").index("GL")
except ValueError:
return True
try:
tumor_gls = [float(x) for x in parts[9].split(":")[gl_index].split(",")]
tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls)))
# No GL information, no tumor call (so fail it)
except IndexError:
tumor_lod = -1.0
try:
normal_gls = [float(x) for x in parts[10].split(":")[gl_index].split(",")]
normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls)))
# No GL inofmration, no normal call (so pass it)
except IndexError:
normal_lod = normal_thresh
return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
def _check_freqs(parts):
"""Ensure frequency of tumor to normal passes a reasonable threshold.
Avoids calling low frequency tumors also present at low frequency in normals,
which indicates a contamination or persistent error.
"""
thresh_ratio = 2.7
try: # FreeBayes
ao_index = parts[8].split(":").index("AO")
ro_index = parts[8].split(":").index("RO")
except ValueError:
ao_index, ro_index = None, None
try: # VarDict
af_index = parts[8].split(":").index("AF")
except ValueError:
af_index = None
if af_index is None and ao_index is None:
raise NotImplementedError("Unexpected format annotations: %s" % parts[0])
def _calc_freq(item):
try:
if ao_index is not None and ro_index is not None:
ao = sum([int(x) for x in item.split(":")[ao_index].split(",")])
ro = int(item.split(":")[ro_index])
freq = ao / float(ao + ro)
elif af_index is not None:
freq = float(item.split(":")[af_index])
except (IndexError, ValueError, ZeroDivisionError):
freq = 0.0
return freq
tumor_freq, normal_freq = _calc_freq(parts[9]), _calc_freq(parts[10])
return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
def call_somatic(line):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Assumes tumor/normal called with tumor first and normal second, as done in bcbio
implementation.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh) and _check_freqs(parts):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
return line
def _clean_freebayes_output(line):
"""Clean FreeBayes output to make post-processing with GATK happy.
XXX Not applied on recent versions which fix issues to be more compatible
with bgzip output, but retained in case of need.
- Remove lines from FreeBayes outputs where REF/ALT are identical:
2 22816178 . G G 0.0339196
or there are multiple duplicate alleles:
4 60594753 . TGAAA T,T
- Remove Type=Int specifications which are not valid VCF and GATK chokes
on.
"""
if line.startswith("#"):
line = line.replace("Type=Int,D", "Type=Integer,D")
return line
else:
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
if len(alleles) == len(set(alleles)):
return line
return None
def clean_vcf_output(orig_file, clean_fn, config, name="clean"):
"""Provide framework to clean a file in-place, with the specified clean
function.
"""
base, ext = utils.splitext_plus(orig_file)
out_file = "{0}-{1}{2}".format(base, name, ext)
if not utils.file_exists(out_file):
with open(orig_file) as in_handle:
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
update_line = clean_fn(line)
if update_line:
out_handle.write(update_line)
move_vcf(orig_file, "{0}.orig".format(orig_file))
move_vcf(out_file, orig_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(orig_file))
|
|
# test_repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.test.lib import (patch,
TestBase,
with_rw_repo,
fixture,
GIT_REPO,
assert_false,
assert_equal,
assert_true,
raises)
from git import *
from git.util import join_path_native
from git.exc import BadObject
from gitdb.util import hex_to_bin, bin_to_hex
import os, sys
import tempfile
import shutil
from cStringIO import StringIO
class TestRepo(TestBase):
@raises(InvalidGitRepositoryError)
def test_new_should_raise_on_invalid_repo_location(self):
Repo(tempfile.gettempdir())
@raises(NoSuchPathError)
def test_new_should_raise_on_non_existant_path(self):
Repo("repos/foobar")
def test_repo_creation_from_different_paths(self):
r_from_gitdir = Repo(self.rorepo.git_dir)
assert r_from_gitdir.git_dir == self.rorepo.git_dir
assert r_from_gitdir.git_dir.endswith('.git')
assert not self.rorepo.git.working_dir.endswith('.git')
assert r_from_gitdir.git.working_dir == self.rorepo.git.working_dir
def test_description(self):
txt = "Test repository"
self.rorepo.description = txt
assert_equal(self.rorepo.description, txt)
def test_heads_should_return_array_of_head_objects(self):
for head in self.rorepo.heads:
assert_equal(Head, head.__class__)
def test_heads_should_populate_head_data(self):
for head in self.rorepo.heads:
assert head.name
assert isinstance(head.commit,Commit)
# END for each head
assert isinstance(self.rorepo.heads.master, Head)
assert isinstance(self.rorepo.heads['master'], Head)
def test_tree_from_revision(self):
tree = self.rorepo.tree('0.1.6')
assert len(tree.hexsha) == 40
assert tree.type == "tree"
assert self.rorepo.tree(tree) == tree
# try from invalid revision that does not exist
self.failUnlessRaises(BadObject, self.rorepo.tree, 'hello world')
def test_commit_from_revision(self):
commit = self.rorepo.commit('0.1.4')
assert commit.type == 'commit'
assert self.rorepo.commit(commit) == commit
def test_commits(self):
mc = 10
commits = list(self.rorepo.iter_commits('0.1.6', max_count=mc))
assert len(commits) == mc
c = commits[0]
assert_equal('9a4b1d4d11eee3c5362a4152216376e634bd14cf', c.hexsha)
assert_equal(["c76852d0bff115720af3f27acdb084c59361e5f6"], [p.hexsha for p in c.parents])
assert_equal("ce41fc29549042f1aa09cc03174896cf23f112e3", c.tree.hexsha)
assert_equal("Michael Trier", c.author.name)
assert_equal("mtrier@gmail.com", c.author.email)
assert_equal(1232829715, c.authored_date)
assert_equal(5*3600, c.author_tz_offset)
assert_equal("Michael Trier", c.committer.name)
assert_equal("mtrier@gmail.com", c.committer.email)
assert_equal(1232829715, c.committed_date)
assert_equal(5*3600, c.committer_tz_offset)
assert_equal("Bumped version 0.1.6\n", c.message)
c = commits[1]
assert isinstance(c.parents, tuple)
def test_trees(self):
mc = 30
num_trees = 0
for tree in self.rorepo.iter_trees('0.1.5', max_count=mc):
num_trees += 1
assert isinstance(tree, Tree)
# END for each tree
assert num_trees == mc
def _assert_empty_repo(self, repo):
# test all kinds of things with an empty, freshly initialized repo.
# It should throw good errors
# entries should be empty
assert len(repo.index.entries) == 0
# head is accessible
assert repo.head
assert repo.head.ref
assert not repo.head.is_valid()
# we can change the head to some other ref
head_ref = Head.from_path(repo, Head.to_full_path('some_head'))
assert not head_ref.is_valid()
repo.head.ref = head_ref
# is_dirty can handle all kwargs
for args in ((1, 0, 0), (0, 1, 0), (0, 0, 1)):
assert not repo.is_dirty(*args)
# END for each arg
# we can add a file to the index ( if we are not bare )
if not repo.bare:
pass
# END test repos with working tree
def test_init(self):
prev_cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
git_dir_rela = "repos/foo/bar.git"
del_dir_abs = os.path.abspath("repos")
git_dir_abs = os.path.abspath(git_dir_rela)
try:
# with specific path
for path in (git_dir_rela, git_dir_abs):
r = Repo.init(path=path, bare=True)
assert isinstance(r, Repo)
assert r.bare == True
assert os.path.isdir(r.git_dir)
self._assert_empty_repo(r)
# test clone
clone_path = path + "_clone"
rc = r.clone(clone_path)
self._assert_empty_repo(rc)
try:
shutil.rmtree(clone_path)
except OSError:
# when relative paths are used, the clone may actually be inside
# of the parent directory
pass
# END exception handling
# try again, this time with the absolute version
rc = Repo.clone_from(r.git_dir, clone_path)
self._assert_empty_repo(rc)
shutil.rmtree(git_dir_abs)
try:
shutil.rmtree(clone_path)
except OSError:
# when relative paths are used, the clone may actually be inside
# of the parent directory
pass
# END exception handling
# END for each path
os.makedirs(git_dir_rela)
os.chdir(git_dir_rela)
r = Repo.init(bare=False)
r.bare == False
self._assert_empty_repo(r)
finally:
try:
shutil.rmtree(del_dir_abs)
except OSError:
pass
os.chdir(prev_cwd)
# END restore previous state
def test_bare_property(self):
self.rorepo.bare
def test_daemon_export(self):
orig_val = self.rorepo.daemon_export
self.rorepo.daemon_export = not orig_val
assert self.rorepo.daemon_export == ( not orig_val )
self.rorepo.daemon_export = orig_val
assert self.rorepo.daemon_export == orig_val
def test_alternates(self):
cur_alternates = self.rorepo.alternates
# empty alternates
self.rorepo.alternates = []
assert self.rorepo.alternates == []
alts = [ "other/location", "this/location" ]
self.rorepo.alternates = alts
assert alts == self.rorepo.alternates
self.rorepo.alternates = cur_alternates
def test_repr(self):
path = os.path.join(os.path.abspath(GIT_REPO), '.git')
assert_equal('<git.Repo "%s">' % path, repr(self.rorepo))
def test_is_dirty_with_bare_repository(self):
orig_value = self.rorepo._bare
self.rorepo._bare = True
assert_false(self.rorepo.is_dirty())
self.rorepo._bare = orig_value
def test_is_dirty(self):
self.rorepo._bare = False
for index in (0,1):
for working_tree in (0,1):
for untracked_files in (0,1):
assert self.rorepo.is_dirty(index, working_tree, untracked_files) in (True, False)
# END untracked files
# END working tree
# END index
orig_val = self.rorepo._bare
self.rorepo._bare = True
assert self.rorepo.is_dirty() == False
self.rorepo._bare = orig_val
def test_head(self):
assert self.rorepo.head.reference.object == self.rorepo.active_branch.object
def test_index(self):
index = self.rorepo.index
assert isinstance(index, IndexFile)
def test_tag(self):
assert self.rorepo.tag('refs/tags/0.1.5').commit
def test_archive(self):
tmpfile = os.tmpfile()
self.rorepo.archive(tmpfile, '0.1.5')
assert tmpfile.tell()
@patch.object(Git, '_call_process')
def test_should_display_blame_information(self, git):
git.return_value = fixture('blame')
b = self.rorepo.blame( 'master', 'lib/git.py')
assert_equal(13, len(b))
assert_equal( 2, len(b[0]) )
# assert_equal(25, reduce(lambda acc, x: acc + len(x[-1]), b))
assert_equal(hash(b[0][0]), hash(b[9][0]))
c = b[0][0]
assert_true(git.called)
assert_equal(git.call_args, (('blame', 'master', '--', 'lib/git.py'), {'p': True}))
assert_equal('634396b2f541a9f2d58b00be1a07f0c358b999b3', c.hexsha)
assert_equal('Tom Preston-Werner', c.author.name)
assert_equal('tom@mojombo.com', c.author.email)
assert_equal(1191997100, c.authored_date)
assert_equal('Tom Preston-Werner', c.committer.name)
assert_equal('tom@mojombo.com', c.committer.email)
assert_equal(1191997100, c.committed_date)
assert_equal('initial grit setup', c.message)
# test the 'lines per commit' entries
tlist = b[0][1]
assert_true( tlist )
assert_true( isinstance( tlist[0], basestring ) )
assert_true( len( tlist ) < sum( len(t) for t in tlist ) ) # test for single-char bug
def test_blame_real(self):
c = 0
for item in self.rorepo.head.commit.tree.traverse(
predicate=lambda i, d: i.type == 'blob' and i.path.endswith('.py')):
c += 1
b = self.rorepo.blame(self.rorepo.head, item.path)
#END for each item to traverse
assert c
def test_untracked_files(self):
base = self.rorepo.working_tree_dir
files = ( join_path_native(base, "__test_myfile"),
join_path_native(base, "__test_other_file") )
num_recently_untracked = 0
try:
for fpath in files:
fd = open(fpath,"wb")
fd.close()
# END for each filename
untracked_files = self.rorepo.untracked_files
num_recently_untracked = len(untracked_files)
# assure we have all names - they are relative to the git-dir
num_test_untracked = 0
for utfile in untracked_files:
num_test_untracked += join_path_native(base, utfile) in files
assert len(files) == num_test_untracked
finally:
for fpath in files:
if os.path.isfile(fpath):
os.remove(fpath)
# END handle files
assert len(self.rorepo.untracked_files) == (num_recently_untracked - len(files))
def test_config_reader(self):
reader = self.rorepo.config_reader() # all config files
assert reader.read_only
reader = self.rorepo.config_reader("repository") # single config file
assert reader.read_only
def test_config_writer(self):
for config_level in self.rorepo.config_level:
try:
writer = self.rorepo.config_writer(config_level)
assert not writer.read_only
except IOError:
# its okay not to get a writer for some configuration files if we
# have no permissions
pass
# END for each config level
def test_creation_deletion(self):
# just a very quick test to assure it generally works. There are
# specialized cases in the test_refs module
head = self.rorepo.create_head("new_head", "HEAD~1")
self.rorepo.delete_head(head)
tag = self.rorepo.create_tag("new_tag", "HEAD~2")
self.rorepo.delete_tag(tag)
self.rorepo.config_writer()
remote = self.rorepo.create_remote("new_remote", "git@server:repo.git")
self.rorepo.delete_remote(remote)
def test_comparison_and_hash(self):
# this is only a preliminary test, more testing done in test_index
assert self.rorepo == self.rorepo and not (self.rorepo != self.rorepo)
assert len(set((self.rorepo, self.rorepo))) == 1
def test_git_cmd(self):
# test CatFileContentStream, just to be very sure we have no fencepost errors
# last \n is the terminating newline that it expects
l1 = "0123456789\n"
l2 = "abcdefghijklmnopqrstxy\n"
l3 = "z\n"
d = "%s%s%s\n" % (l1, l2, l3)
l1p = l1[:5]
# full size
# size is without terminating newline
def mkfull():
return Git.CatFileContentStream(len(d)-1, StringIO(d))
ts = 5
def mktiny():
return Git.CatFileContentStream(ts, StringIO(d))
# readlines no limit
s = mkfull()
lines = s.readlines()
assert len(lines) == 3 and lines[-1].endswith('\n')
assert s._stream.tell() == len(d) # must have scrubbed to the end
# realines line limit
s = mkfull()
lines = s.readlines(5)
assert len(lines) == 1
# readlines on tiny sections
s = mktiny()
lines = s.readlines()
assert len(lines) == 1 and lines[0] == l1p
assert s._stream.tell() == ts+1
# readline no limit
s = mkfull()
assert s.readline() == l1
assert s.readline() == l2
assert s.readline() == l3
assert s.readline() == ''
assert s._stream.tell() == len(d)
# readline limit
s = mkfull()
assert s.readline(5) == l1p
assert s.readline() == l1[5:]
# readline on tiny section
s = mktiny()
assert s.readline() == l1p
assert s.readline() == ''
assert s._stream.tell() == ts+1
# read no limit
s = mkfull()
assert s.read() == d[:-1]
assert s.read() == ''
assert s._stream.tell() == len(d)
# read limit
s = mkfull()
assert s.read(5) == l1p
assert s.read(6) == l1[5:]
assert s._stream.tell() == 5 + 6 # its not yet done
# read tiny
s = mktiny()
assert s.read(2) == l1[:2]
assert s._stream.tell() == 2
assert s.read() == l1[2:ts]
assert s._stream.tell() == ts+1
def _assert_rev_parse_types(self, name, rev_obj):
rev_parse = self.rorepo.rev_parse
if rev_obj.type == 'tag':
rev_obj = rev_obj.object
# tree and blob type
obj = rev_parse(name + '^{tree}')
assert obj == rev_obj.tree
obj = rev_parse(name + ':CHANGES')
assert obj.type == 'blob' and obj.path == 'CHANGES'
assert rev_obj.tree['CHANGES'] == obj
def _assert_rev_parse(self, name):
"""tries multiple different rev-parse syntaxes with the given name
:return: parsed object"""
rev_parse = self.rorepo.rev_parse
orig_obj = rev_parse(name)
if orig_obj.type == 'tag':
obj = orig_obj.object
else:
obj = orig_obj
# END deref tags by default
# try history
rev = name + "~"
obj2 = rev_parse(rev)
assert obj2 == obj.parents[0]
self._assert_rev_parse_types(rev, obj2)
# history with number
ni = 11
history = [obj.parents[0]]
for pn in range(ni):
history.append(history[-1].parents[0])
# END get given amount of commits
for pn in range(11):
rev = name + "~%i" % (pn+1)
obj2 = rev_parse(rev)
assert obj2 == history[pn]
self._assert_rev_parse_types(rev, obj2)
# END history check
# parent ( default )
rev = name + "^"
obj2 = rev_parse(rev)
assert obj2 == obj.parents[0]
self._assert_rev_parse_types(rev, obj2)
# parent with number
for pn, parent in enumerate(obj.parents):
rev = name + "^%i" % (pn+1)
assert rev_parse(rev) == parent
self._assert_rev_parse_types(rev, parent)
# END for each parent
return orig_obj
@with_rw_repo('HEAD', bare=False)
def test_rw_rev_parse(self, rwrepo):
# verify it does not confuse branches with hexsha ids
ahead = rwrepo.create_head('aaaaaaaa')
assert(rwrepo.rev_parse(str(ahead)) == ahead.commit)
def test_rev_parse(self):
rev_parse = self.rorepo.rev_parse
# try special case: This one failed at some point, make sure its fixed
assert rev_parse("33ebe").hexsha == "33ebe7acec14b25c5f84f35a664803fcab2f7781"
# start from reference
num_resolved = 0
for ref in Reference.iter_items(self.rorepo):
path_tokens = ref.path.split("/")
for pt in range(len(path_tokens)):
path_section = '/'.join(path_tokens[-(pt+1):])
try:
obj = self._assert_rev_parse(path_section)
assert obj.type == ref.object.type
num_resolved += 1
except BadObject:
print "failed on %s" % path_section
# is fine, in case we have something like 112, which belongs to remotes/rname/merge-requests/112
pass
# END exception handling
# END for each token
# END for each reference
assert num_resolved
# it works with tags !
tag = self._assert_rev_parse('0.1.4')
assert tag.type == 'tag'
# try full sha directly ( including type conversion )
assert tag.object == rev_parse(tag.object.hexsha)
self._assert_rev_parse_types(tag.object.hexsha, tag.object)
# multiple tree types result in the same tree: HEAD^{tree}^{tree}:CHANGES
rev = '0.1.4^{tree}^{tree}'
assert rev_parse(rev) == tag.object.tree
assert rev_parse(rev+':CHANGES') == tag.object.tree['CHANGES']
# try to get parents from first revision - it should fail as no such revision
# exists
first_rev = "33ebe7acec14b25c5f84f35a664803fcab2f7781"
commit = rev_parse(first_rev)
assert len(commit.parents) == 0
assert commit.hexsha == first_rev
self.failUnlessRaises(BadObject, rev_parse, first_rev+"~")
self.failUnlessRaises(BadObject, rev_parse, first_rev+"^")
# short SHA1
commit2 = rev_parse(first_rev[:20])
assert commit2 == commit
commit2 = rev_parse(first_rev[:5])
assert commit2 == commit
# todo: dereference tag into a blob 0.1.7^{blob} - quite a special one
# needs a tag which points to a blob
# ref^0 returns commit being pointed to, same with ref~0, and ^{}
tag = rev_parse('0.1.4')
for token in (('~0', '^0', '^{}')):
assert tag.object == rev_parse('0.1.4%s' % token)
# END handle multiple tokens
# try partial parsing
max_items = 40
for i, binsha in enumerate(self.rorepo.odb.sha_iter()):
assert rev_parse(bin_to_hex(binsha)[:8-(i%2)]).binsha == binsha
if i > max_items:
# this is rather slow currently, as rev_parse returns an object
# which requires accessing packs, it has some additional overhead
break
# END for each binsha in repo
# missing closing brace commit^{tree
self.failUnlessRaises(ValueError, rev_parse, '0.1.4^{tree')
# missing starting brace
self.failUnlessRaises(ValueError, rev_parse, '0.1.4^tree}')
# REVLOG
#######
head = self.rorepo.head
# need to specify a ref when using the @ syntax
self.failUnlessRaises(BadObject, rev_parse, "%s@{0}" % head.commit.hexsha)
# uses HEAD.ref by default
assert rev_parse('@{0}') == head.commit
if not head.is_detached:
refspec = '%s@{0}' % head.ref.name
assert rev_parse(refspec) == head.ref.commit
# all additional specs work as well
assert rev_parse(refspec+"^{tree}") == head.commit.tree
assert rev_parse(refspec+":CHANGES").type == 'blob'
#END operate on non-detached head
# the last position
assert rev_parse('@{1}') != head.commit
# position doesn't exist
self.failUnlessRaises(IndexError, rev_parse, '@{10000}')
# currently, nothing more is supported
self.failUnlessRaises(NotImplementedError, rev_parse, "@{1 week ago}")
def test_repo_odbtype(self):
target_type = GitDB
if sys.version_info[1] < 5:
target_type = GitCmdObjectDB
assert isinstance(self.rorepo.odb, target_type)
def test_submodules(self):
assert len(self.rorepo.submodules) == 1 # non-recursive
assert len(list(self.rorepo.iter_submodules())) >= 2
assert isinstance(self.rorepo.submodule("gitdb"), Submodule)
self.failUnlessRaises(ValueError, self.rorepo.submodule, "doesn't exist")
@with_rw_repo('HEAD', bare=False)
def test_submodule_update(self, rwrepo):
# fails in bare mode
rwrepo._bare = True
self.failUnlessRaises(InvalidGitRepositoryError, rwrepo.submodule_update)
rwrepo._bare = False
# test create submodule
sm = rwrepo.submodules[0]
sm = rwrepo.create_submodule("my_new_sub", "some_path", join_path_native(self.rorepo.working_tree_dir, sm.path))
assert isinstance(sm, Submodule)
# note: the rest of this functionality is tested in test_submodule
|
|
"""
Different helpful functions, objects, methods are collected here.
"""
from __future__ import division, print_function, absolute_import
from collections import OrderedDict
import numexpr
import numpy
import pandas
from sklearn.utils.validation import column_or_1d
from sklearn.metrics import roc_curve
def weighted_percentile(array, percentiles, sample_weight=None, array_sorted=False, old_style=False):
array = numpy.array(array)
percentiles = numpy.array(percentiles)
sample_weight = check_sample_weight(array, sample_weight)
assert numpy.all(percentiles >= 0) and numpy.all(percentiles <= 1), 'Percentiles should be in [0, 1]'
if not array_sorted:
array, sample_weight = reorder_by_first(array, sample_weight)
weighted_quantiles = numpy.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= numpy.sum(sample_weight)
return numpy.interp(percentiles, weighted_quantiles, array)
def reorder_by_first(*arrays):
"""
Applies the same permutation to all passed arrays,
permutation sorts the first passed array
"""
arrays = check_arrays(*arrays)
order = numpy.argsort(arrays[0])
return [arr[order] for arr in arrays]
def check_sample_weight(y_true, sample_weight):
"""
Checks the weights, returns normalized version
"""
if sample_weight is None:
return numpy.ones(len(y_true), dtype=numpy.float)
else:
sample_weight = numpy.array(sample_weight, dtype=numpy.float)
assert len(y_true) == len(sample_weight), \
"The length of weights is different: not {0}, but {1}".format(len(y_true), len(sample_weight))
return sample_weight
class Flattener(object):
"""
Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
Parameters:
-----------
:param data: predictions
:type data: list or numpy.array
:param sample_weight: weights
:type sample_weight: None or list or numpy.array
Example:
--------
>>> normalizer = Flattener(signal)
>>> hist(normalizer(background))
>>> hist(normalizer(signal))
:return func: normalization function
"""
def __init__(self, data, sample_weight=None):
sample_weight = check_sample_weight(data, sample_weight=sample_weight)
data = column_or_1d(data)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
self.data, sample_weight = reorder_by_first(data, sample_weight)
self.predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def __call__(self, data):
return numpy.interp(data, self.data, self.predictions)
class Binner:
def __init__(self, values, bins_number):
"""
Binner is a class that helps to split the values into several bins.
Initially an array of values is given, which is then splitted into 'bins_number' equal parts,
and thus we are computing limits (boundaries of bins)."""
percentiles = [i * 100.0 / bins_number for i in range(1, bins_number)]
self.limits = numpy.percentile(values, percentiles)
def get_bins(self, values):
return numpy.searchsorted(self.limits, values)
def get_bins_dumb(self, values):
"""This is the sane as previous function, but a bit slower and naive"""
result = numpy.zeros(len(values))
for limit in self.limits:
result += values > limit
return result
def set_limits(self, limits):
self.limits = limits
def bins_number(self):
return len(self.limits) + 1
def split_into_bins(self, *arrays):
"""
Splits the data of parallel arrays into bins, the first array is binning variable
"""
values = arrays[0]
for array in arrays:
assert len(array) == len(values), "passed arrays have different length"
bins = self.get_bins(values)
result = []
for bin in range(len(self.limits) + 1):
indices = bins == bin
result.append([numpy.array(array)[indices] for array in arrays])
return result
def calc_ROC(prediction, signal, sample_weight=None, max_points=10000):
"""
Calculate roc curve, returns limited number of points.
:param prediction: predictions
:type prediction: array or list
:param signal: true labels
:type signal: array or list
:param sample_weight: weights
:type sample_weight: None or array or list
:param int max_points: maximum of used points on roc curve
:return: (tpr, tnr), (err_tnr, err_tpr), thresholds
"""
sample_weight = numpy.ones(len(signal)) if sample_weight is None else sample_weight
prediction, signal, sample_weight = check_arrays(prediction, signal, sample_weight)
assert set(signal) == {0, 1}, "the labels should be 0 and 1, labels are " + str(set(signal))
fpr, tpr, thresholds = roc_curve(signal, prediction, sample_weight=sample_weight)
tpr = numpy.insert(tpr, 0, 0.)
fpr = numpy.insert(fpr, 0, 0.)
thresholds = numpy.insert(thresholds, 0, thresholds[0] + 1.)
tnr = 1 - fpr
weight_bck = sample_weight[signal == 0]
weight_sig = sample_weight[signal == 1]
err_tnr = numpy.sqrt(tnr * (1 - tnr) * numpy.sum(weight_bck ** 2)) / numpy.sum(weight_bck)
err_tpr = numpy.sqrt(tpr * (1 - tpr) * numpy.sum(weight_sig ** 2)) / numpy.sum(weight_sig)
if len(prediction) > max_points:
sum_weights = numpy.cumsum((fpr + tpr) / 2.)
sum_weights /= sum_weights[-1]
positions = numpy.searchsorted(sum_weights, numpy.linspace(0, 1, max_points))
tpr, tnr = tpr[positions], tnr[positions]
err_tnr, err_tpr = err_tnr[positions], err_tpr[positions]
thresholds = thresholds[positions]
return (tpr, tnr), (err_tnr, err_tpr), thresholds
def calc_feature_correlation_matrix(df):
"""
Calculate correlation matrix
:param pandas.DataFrame df: data
:return: correlation matrix for dataFrame
:rtype: numpy.ndarray
"""
# TODO use weights
return numpy.corrcoef(df.values.T)
def calc_hist_with_errors(x, weight=None, bins=60, normed=True, x_range=None, ignored_sideband=0.0):
"""
Calculate data for error bar (for plot pdf with errors)
:param x: data
:type x: list or numpy.array
:param weight: weights
:type weight: None or list or numpy.array
:return: tuple (x-points (list), y-points (list), y points errors (list), x points errors (list))
"""
weight = numpy.ones(len(x)) if weight is None else weight
x, weight = check_arrays(x, weight)
if x_range is None:
x_range = numpy.percentile(x, [100 * ignored_sideband, 100 * (1 - ignored_sideband)])
ans, bins = numpy.histogram(x, bins=bins, normed=normed, weights=weight, range=x_range)
yerr = []
normalization = 1.0
if normed:
normalization = float(len(bins) - 1) / float(sum(weight)) / (x_range[1] - x_range[0])
for i in range(len(bins) - 1):
weight_bin = weight[(x > bins[i]) * (x <= bins[i + 1])]
yerr.append(numpy.sqrt(sum(weight_bin * weight_bin)) * normalization)
bins_mean = [0.5 * (bins[i] + bins[i + 1]) for i in range(len(ans))]
xerr = [0.5 * (bins[i + 1] - bins[i]) for i in range(len(ans))]
return bins_mean, ans, yerr, xerr
def get_efficiencies(prediction, spectator, sample_weight=None, bins_number=20,
thresholds=None, errors=False, ignored_sideband=0.0):
"""
Construct efficiency function dependent on spectator for each threshold
Different score functions available: Efficiency, Precision, Recall, F1Score,
and other things from sklearn.metrics
Parameters:
-----------
:param prediction: list of probabilities
:param spectator: list of spectator's values
:param bins_number: int, count of bins for plot
:param thresholds: list of prediction's threshold
(default=prediction's cuts for which efficiency will be [0.2, 0.4, 0.5, 0.6, 0.8])
:return:
if errors=False
OrderedDict threshold -> (x_values, y_values)
if errors=True
OrderedDict threshold -> (x_values, y_values, y_err, x_err)
All the parts: x_values, y_values, y_err, x_err are numpy.arrays of the same length.
"""
prediction, spectator = \
check_arrays(prediction, spectator)
spectator_min, spectator_max = numpy.percentile(spectator, [100 * ignored_sideband, 100 * (1. - ignored_sideband)])
mask = (spectator >= spectator_min) & (spectator <= spectator_max)
spectator = spectator[mask]
prediction = prediction[mask]
bins_number = min(bins_number, len(prediction))
sample_weight = sample_weight if sample_weight is None else numpy.array(sample_weight)[mask]
if thresholds is None:
thresholds = [weighted_percentile(prediction, percentiles=1 - eff, sample_weight=sample_weight)
for eff in [0.2, 0.4, 0.5, 0.6, 0.8]]
binner = Binner(spectator, bins_number=bins_number)
bins_data = binner.split_into_bins(spectator, prediction)
bin_edges = numpy.array([spectator_min] + list(binner.limits) + [spectator_max])
xerr = numpy.diff(bin_edges) / 2.
result = OrderedDict()
for threshold in thresholds:
x_values = []
y_values = []
for num, (masses, probabilities) in enumerate(bins_data):
y_values.append(numpy.mean(probabilities > threshold))
if errors:
x_values.append((bin_edges[num + 1] + bin_edges[num]) / 2.)
else:
x_values.append(numpy.mean(masses))
x_values, y_values = check_arrays(x_values, y_values)
if errors:
result[threshold] = (x_values, y_values, numpy.sqrt(y_values * (1 - y_values) / len(y_values)), xerr)
else:
result[threshold] = (x_values, y_values)
return result
def train_test_split(*arrays, **kw_args):
"""Does the same thing as train_test_split, but preserves columns in DataFrames.
Uses the same parameters: test_size, train_size, random_state, and has the same interface
:type arrays: list[numpy.array] or list[pandas.DataFrame]
:type bool: allow_none, default False (specially for sample_weight - both to None)
:param arrays: arrays to split
"""
from sklearn import cross_validation
allow_none = kw_args.pop('allow_none', None)
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
train_indices, test_indices = cross_validation.train_test_split(range(length), **kw_args)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
elif (array is None) and allow_none:
# specially for checking weights
result.append(None)
result.append(None)
else:
result.append(numpy.array(array)[train_indices])
result.append(numpy.array(array)[test_indices])
return result
def train_test_split_group(group_column, *arrays, **kw_args):
"""Does the same thing as train_test_split, but preserves names of columns in DataFrames.
Uses the same parameters: test_size, train_size, random_state, and has almost the same interface
:param arrays: arrays to split
:type arrays: list[numpy.array] or list[pandas.DataFrame]
:param group_column: array-like of shape [n_samples] with indices of groups,
events from one group will be kept together (all events in train or all events in test).
If `group_column` is used, train_size and test_size will refer to number of groups, not events
:param bool allow_none: default False
(specially for sample_weight - after splitting train and test of `None` are `None` too)
"""
from sklearn import cross_validation
allow_none = kw_args.pop('allow_none', None)
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
initial_data = numpy.array(group_column)
assert len(initial_data) == length, "group column must have the same length"
group_ids = numpy.unique(initial_data)
train_indices, test_indices = cross_validation.train_test_split(group_ids, **kw_args)
train_indices = numpy.in1d(initial_data, train_indices)
test_indices = numpy.in1d(initial_data, test_indices)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
elif (array is None) and allow_none:
# specially for checking weights
result.append(None)
result.append(None)
else:
result.append(numpy.array(array)[train_indices])
result.append(numpy.array(array)[test_indices])
return result
def get_columns_dict(columns):
"""
Get (new column: old column) dict expressions
:param list[str] columns: columns names
:rtype: dict
"""
result = OrderedDict()
for column in columns:
column_split = column.split(':')
assert len(column_split) < 3, 'Error in parsing feature expression {}'.format(column)
if len(column_split) == 2:
result[column_split[0].strip()] = column_split[1].strip()
else:
result[column] = column
return result
def get_columns_in_df(df, columns):
"""
Get columns in data frame using *numexpr* evaluation
:param pandas.DataFrame df: data
:param columns: necessary columns
:param columns: None or list[str]
:return: data frame with pointed columns
"""
if columns is None:
return df
columns_dict = get_columns_dict(columns)
df_new = OrderedDict()
for column_new, column in columns_dict.items():
df_new[column_new] = numexpr.evaluate(column, local_dict=df)
return pandas.DataFrame(df_new)
def check_arrays(*arrays):
assert len(arrays) > 0, 'The number of array must be greater than zero'
checked_arrays = []
shapes = []
for arr in arrays:
if arr is not None:
checked_arrays.append(numpy.array(arr))
shapes.append(checked_arrays[-1].shape[0])
else:
checked_arrays.append(arr)
assert numpy.sum(numpy.array(shapes) == shapes[0]) == len(shapes), 'Different shapes of the arrays {}'.format(shapes)
return checked_arrays
|
|
import os
import sys
import grammar
class Token(object):
"""Token which is a result from Lexer
symbol: symbol in grammar
lexeme: text hit
"""
def __init__(self, symbol, lexeme, position):
self.symbol = symbol
self.lexeme = lexeme
self.position = position
def __str__(self):
return "{0} {1}".format(self.symbol.id, repr(self.lexeme))
class Lexer(object):
"""Lexical Analyzer class which generate tokens from string.
It works by a DFA in grammar.
"""
def __init__(self, grammar):
self.grammar = grammar
self._load(None, False)
def load_file(self, file_or_path, encoding=None):
""" Load a file to lexer.
File_or_path could be file object or file name.
"""
if (isinstance(file_or_path, str) or
isinstance(file_or_path, unicode)):
import codecs
if encoding:
self._load(codecs.open(file_or_path, encoding=encoding), True)
else:
self._load(open(file_or_path, "rb"), False)
else:
self._load(file_or_path, encoding is not None)
def load_string(self, s):
""" Load a string to lexer.
"""
import StringIO
self._load(StringIO.StringIO(s), s is unicode)
def _load(self, file, is_unicode):
self.file = file
self.is_unicode = is_unicode
self.buf = u"" if is_unicode else str()
self.buf_cur = 0
self.buf_remain = 0
self.line = 1
self.column = 1
self.group_stack = []
def _load_buffer(self):
# shrink buffer
if self.buf_cur >= 4096:
self.buf = self.buf[self.buf_cur:]
self.buf_cur = 0
# read into buffer
self.buf += self.file.read(4096)
self.buf_remain = len(self.buf) - self.buf_cur
def _consume_buffer(self, n):
# update line, column position
start = self.buf_cur
new_line_i = -1
while True:
i = self.buf.find("\n", start, self.buf_cur + n)
if i != -1:
start = new_line_i = i + 1
self.line += 1
else:
if new_line_i == -1:
self.column += n
else:
self.column = 1 + self.buf_cur + n - new_line_i
break
# manipulate buffer
if n < self.buf_remain:
self.buf_cur += n
self.buf_remain -= n
else:
self.buf = u"" if self.is_unicode else str()
self.buf_cur = 0
self.buf_remain = 0
@property
def position(self):
return (self.line, self.column)
def peek_token(self):
""" peek next token and return it
it doens't change any cursor state of lexer.
"""
state = self.grammar.dfainit
cur = 0
hit_symbol = None
while True:
if cur < self.buf_remain: # peek 1 char
c = self.buf[self.buf_cur + cur]
else:
self._load_buffer()
if cur < self.buf_remain:
c = self.buf[self.buf_cur + cur]
else:
break # if EOF
cur += 1
next_index = -1 # find next state
c_ord = ord(c)
for (r_min, r_max), target_index, target in state.edges_lookup:
if c_ord >= r_min and c_ord <= r_max:
next_index = target_index
next_state = target
break
if next_index == -3:
continue
elif next_index == -2:
hit_cur = cur
continue
elif next_index == -1:
break
else:
state = next_state
if next_state.accept_symbol: # keep acceptable
hit_symbol = next_state.accept_symbol
hit_cur = cur
if hit_symbol:
lexeme = self.buf[self.buf_cur:self.buf_cur + hit_cur]
return Token(hit_symbol, lexeme, self.position)
else:
if cur == 0:
return Token(self.grammar.symbol_EOF, "", self.position)
else:
lexeme = self.buf[self.buf_cur:self.buf_cur + cur]
return Token(self.grammar.symbol_Error, lexeme, self.position)
def read_token(self):
""" Read next token and return it.
It moves a read cursor forward and it processes a lexical group.
"""
while True:
token = self.peek_token()
# check if a start of new group
if token.symbol.type == grammar.SymbolType.GROUP_START:
symbol_group = [g for g in self.grammar.symbolgroups.itervalues() if g.start == token.symbol][0]
if len(self.group_stack) == 0:
nest_group = True
else:
nest_group = symbol_group in self.group_stack[-1][0].nesting_groups
else:
nest_group = False
if nest_group:
# into nested
self._consume_buffer(len(token.lexeme))
self.group_stack.append([symbol_group,
token.lexeme, token.position])
elif len(self.group_stack) == 0:
# token in plain
self._consume_buffer(len(token.lexeme))
return token
elif self.group_stack[-1][0].end == token.symbol:
# out of nested
pop = self.group_stack.pop()
if pop[0].ending_mode == grammar.EndingModeType.CLOSED:
pop[1] = pop[1] + token.lexeme
self._consume_buffer(len(token.lexeme))
if len(self.group_stack) == 0:
return Token(pop[0].container, pop[1], pop[2])
else:
self.group_stack[-1][1] = self.group_stack[-1][1] + pop[1]
elif token.symbol == self.grammar.symbol_EOF:
# EOF in nested
return token
else:
# token in nested
top = self.group_stack[-1]
if top[0].advance_mode == grammar.AdvanceModeType.TOKEN:
top[1] = top[1] + token.lexeme
self._consume_buffer(len(token.lexeme))
else:
top[1] = top[1] + token.lexeme[0]
self._consume_buffer(1)
def read_token_all(self):
""" Read all token until EOF.
If no error return END_OF_FILE, otherwise ERROR.
"""
ret = []
while True:
token = self.read_token()
ret.append(token)
if token.symbol.type in (grammar.SymbolType.END_OF_FILE,
grammar.SymbolType.ERROR):
break
return ret
|
|
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.system_host_mock import MockSystemHost
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.layout_tests.models.test_results import TestResult
from webkitpy.layout_tests.port.test import TestPort
TestExpectations = test_expectations.TestExpectations
class FakePrinter(object):
num_completed = 0
num_tests = 0
def print_expected(self, run_results, get_tests_with_result_type):
pass
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
pass
def print_started_test(self, test_name):
pass
def print_finished_test(self, result, expected, exp_str, got_str):
pass
def write(self, msg):
pass
def write_update(self, msg):
pass
def flush(self):
pass
class LockCheckingRunner(LayoutTestRunner):
def __init__(self, port, options, printer, tester, http_lock):
super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
self._finished_list_called = False
self._tester = tester
self._should_have_http_lock = http_lock
def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
# TODO(qyearsley): This is never called; it should be fixed or removed.
self._tester.fail('This is never called')
if not self._finished_list_called:
self._tester.assertEqual(list_name, 'locked_tests')
self._tester.assertTrue(self._remaining_locked_shards)
self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
if not self._finished_list_called:
self._tester.assertEqual(self._remaining_locked_shards, [])
self._tester.assertFalse(self._has_http_lock)
self._finished_list_called = True
class LayoutTestRunnerTests(unittest.TestCase):
def _runner(self, port=None):
# FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
options = run_webkit_tests.parse_args(['--platform', 'test-mac-mac10.11'])[0]
options.child_processes = '1'
host = MockHost()
port = port or host.port_factory.get(options.platform, options=options)
return LockCheckingRunner(port, options, FakePrinter(), self, True)
def _run_tests(self, runner, tests):
test_inputs = [TestInput(test, timeout_ms=6000) for test in tests]
expectations = TestExpectations(runner._port, tests)
runner.run_tests(expectations, test_inputs, set(), num_workers=1)
def test_interrupt_if_at_failure_limits(self):
runner = self._runner()
runner._options.exit_after_n_failures = None
runner._options.exit_after_n_crashes_or_times = None
test_names = ['passes/text.html', 'passes/image.html']
runner._test_inputs = [TestInput(test_name, timeout_ms=6000) for test_name in test_names] # pylint: disable=protected-access
run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
run_results.unexpected_failures = 100
run_results.unexpected_crashes = 50
run_results.unexpected_timeouts = 50
# No exception when the exit_after* options are None.
runner._interrupt_if_at_failure_limits(run_results)
# No exception when we haven't hit the limit yet.
runner._options.exit_after_n_failures = 101
runner._options.exit_after_n_crashes_or_timeouts = 101
runner._interrupt_if_at_failure_limits(run_results)
# Interrupt if we've exceeded either limit:
runner._options.exit_after_n_crashes_or_timeouts = 10
self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
runner._options.exit_after_n_crashes_or_timeouts = None
runner._options.exit_after_n_failures = 10
self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
def test_update_summary_with_result(self):
# Reftests expected to be image mismatch should be respected when pixel_tests=False.
runner = self._runner()
runner._options.pixel_tests = False
test = 'failures/expected/reftest.html'
expectations = TestExpectations(runner._port, tests=[test])
runner._expectations = expectations
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(1, run_results.expected)
self.assertEqual(0, run_results.unexpected)
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[], reftest_type=['=='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(0, run_results.expected)
self.assertEqual(1, run_results.unexpected)
class SharderTests(unittest.TestCase):
test_list = [
"http/tests/websocket/tests/unicode.htm",
"animations/keyframes.html",
"http/tests/security/view-source-no-refresh.html",
"http/tests/websocket/tests/websocket-protocol-ignored.html",
"fast/css/display-none-inline-style-change-crash.html",
"http/tests/xmlhttprequest/supported-xml-content-types.html",
"dom/html/level2/html/HTMLAnchorElement03.html",
"dom/html/level2/html/HTMLAnchorElement06.html",
"perf/object-keys.html",
"virtual/threaded/dir/test.html",
"virtual/threaded/fast/foo/test.html",
]
def get_test_input(self, test_file):
return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
def get_shards(self, num_workers, fully_parallel, run_singly, test_list=None, max_locked_shards=1):
port = TestPort(MockSystemHost())
self.sharder = Sharder(port.split_test, max_locked_shards)
test_list = test_list or self.test_list
return self.sharder.shard_tests([self.get_test_input(test) for test in test_list],
num_workers, fully_parallel, run_singly)
def assert_shards(self, actual_shards, expected_shard_names):
self.assertEqual(len(actual_shards), len(expected_shard_names))
for i, shard in enumerate(actual_shards):
expected_shard_name, expected_test_names = expected_shard_names[i]
self.assertEqual(shard.name, expected_shard_name)
self.assertEqual([test_input.test_name for test_input in shard.test_inputs],
expected_test_names)
def test_shard_by_dir(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, run_singly=False)
# Note that although there are tests in multiple dirs that need locks,
# they are crammed into a single shard in order to reduce the # of
# workers hitting the server at once.
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('animations', ['animations/keyframes.html']),
('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
'dom/html/level2/html/HTMLAnchorElement06.html']),
('fast/css', ['fast/css/display-none-inline-style-change-crash.html'])])
def test_shard_every_file(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('.', ['animations/keyframes.html']),
('.', ['fast/css/display-none-inline-style-change-crash.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
def test_shard_in_two(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False)
self.assert_shards(locked,
[('locked_tests',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('unlocked_tests',
['animations/keyframes.html',
'fast/css/display-none-inline-style-change-crash.html',
'dom/html/level2/html/HTMLAnchorElement03.html',
'dom/html/level2/html/HTMLAnchorElement06.html',
'virtual/threaded/dir/test.html',
'virtual/threaded/fast/foo/test.html'])])
def test_shard_in_two_has_no_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
test_list=['animations/keyframe.html'])
self.assertEqual(len(locked), 0)
self.assertEqual(len(unlocked), 1)
def test_shard_in_two_has_no_unlocked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
test_list=['http/tests/websocket/tests/unicode.htm'])
self.assertEqual(len(locked), 1)
self.assertEqual(len(unlocked), 0)
def test_multiple_locked_shards(self):
locked, _ = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
locked, _ = self.get_shards(num_workers=4, fully_parallel=False, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
def test_virtual_shards(self):
# With run_singly=False, we try to keep all of the tests in a virtual suite together even
# when fully_parallel=True, so that we don't restart every time the command line args change.
_, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False,
test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
self.assert_shards(unlocked,
[('virtual/foo', ['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])])
# But, with run_singly=True, we have to restart every time anyway, so we want full parallelism.
_, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=True,
test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
self.assert_shards(unlocked,
[('.', ['virtual/foo/bar1.html']),
('.', ['virtual/foo/bar2.html'])])
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
PXE Driver and supporting meta-classes.
"""
import os
import tempfile
import jinja2
from oslo.config import cfg
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service as service
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.openstack.common import context
from ironic.openstack.common import fileutils
from ironic.openstack.common import lockutils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/ironic/net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/drivers/modules/pxe_config.template',
help='Template file for PXE configuration'),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Ironic compute node\'s tftp root path'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk'),
cfg.StrOpt('tftp_master_path',
default='/tftpboot/master_images',
help='Directory where master tftp images are stored on disk'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master tftp images are stored on disk')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
CONF.import_opt('use_ipv6', 'ironic.netconf')
def _parse_driver_info(node):
"""Gets the driver-specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node to validate.
:returns: A dict with the driver_info values.
"""
info = node.get('driver_info', '').get('pxe')
d_info = {}
d_info['instance_name'] = info.get('instance_name', None)
d_info['image_source'] = info.get('image_source', None)
d_info['deploy_kernel'] = info.get('deploy_kernel',
CONF.pxe.deploy_kernel)
d_info['deploy_ramdisk'] = info.get('deploy_ramdisk',
CONF.pxe.deploy_ramdisk)
d_info['root_gb'] = info.get('root_gb', None)
missing_info = []
for label in d_info:
if not d_info[label]:
missing_info.append(label)
if missing_info:
raise exception.InvalidParameterValue(_(
"Can not validate PXE bootloader. The following paramenters "
"were not passed to ironic: %s") % missing_info)
#TODO(ghe): Should we get rid of swap partition?
d_info['swap_mb'] = info.get('swap_mb', 1)
d_info['key_data'] = info.get('key_data', None)
for param in ('root_gb', 'swap_mb'):
try:
int(d_info[param])
except ValueError:
raise exception.InvalidParameterValue(_(
"Can not validate PXE bootloader. Invalid "
"parameter %s") % param)
return d_info
def _build_pxe_config(node, pxe_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
:param pxe_options: A dict of values to set on the configuarion file
:returns: A formated string with the file content.
"""
LOG.debug(_("Building PXE config for deployment %s.") % node['id'])
pxe_options = {
'deployment_id': node['id'],
'deployment_key': utils.random_alnum(32),
'deployment_iscsi_iqn': "iqn-%s" % node['instance_uuid'],
'deployment_aki_path': pxe_info['deploy_kernel'][1],
'deployment_ari_path': pxe_info['deploy_ramdisk'][1],
'aki_path': pxe_info['kernel'][1],
'ari_path': pxe_info['ramdisk'][1],
'pxe_append_params': CONF.pxe.pxe_append_params,
}
tmpl_path, tmpl_file = os.path.split(CONF.pxe.pxe_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '{{ ROOT }}'})
def _get_node_mac_addresses(task, node):
"""Get all mac addresses for a node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
:returns: A list of macs address in the format xx:xx:xx:xx:xx:xx.
"""
for r in task.resources:
if r.node.id == node['id']:
return [p.address for p in r.ports]
def _get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name.
:param mac: A mac address string in the format xx:xx:xx:xx:xx:xx.
:returns: the path to the config file.
"""
return os.path.join(
CONF.pxe.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def _get_pxe_config_file_path(instance_uuid):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.pxe.tftp_root, instance_uuid, 'config')
def _get_image_dir_path(d_info):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, d_info['instance_name'])
def _get_image_file_path(d_info):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(d_info), 'disk')
@lockutils.synchronized('master_image', 'ironic-')
def _link_master_image(path, dest_path):
"""Create a link from path to dest_path using locking to
avoid image manipulation during the process.
"""
if os.path.exists(path):
os.link(path, dest_path)
@lockutils.synchronized('master_image', 'ironic-')
def _unlink_master_image(path):
#TODO(ghe): keep images for a while (kind of local cache)
# If an image has been used, it-s likely to be used again
# With no space problems, we can keep it, so next time
# only a new link needs to be created.
# Replace algorithm to look: disk space (trigger)
# lru, aged...
# os.statvfs
# heapq.nlargest(1, [(f, os.stat('./' + f).st_ctime) for f in
# os.listdir('.') if os.stat('./' + f).st_nlink == 1], key=lambda s: s[1])
if os.path.exists(path) and os.stat(path).st_nlink == 1:
utils.unlink_without_raise(path)
@lockutils.synchronized('master_image', 'ironic-')
def _create_master_image(tmp_path, master_uuid, path):
"""With recently download image, use it as master image, and link to
instances uuid. Uses file locking to avoid image maniputalion
during the process.
"""
if not os.path.exists(master_uuid):
os.link(tmp_path, master_uuid)
os.link(master_uuid, path)
os.unlink(tmp_path)
@lockutils.synchronized('get_image', 'ironic-')
def _download_in_progress(lock_file):
"""Get image file lock to avoid downloading the same image
simultaneously.
"""
if not os.path.exists(lock_file):
open(lock_file, 'w')
return False
else:
return True
@lockutils.synchronized('get_image', 'ironic-')
def _remove_download_in_progress_lock(lock_file):
"""Removes image file lock to indicate that image download has finished
and we can start to use it.
"""
fileutils.delete_if_exists(lock_file)
def _get_image(ctx, path, uuid, master_path=None, image_service=None):
#TODO(ghe): Revise this logic and cdocument process Bug #1199665
# When master_path defined, we save the images in this dir using the iamge
# uuid as the file name. Deployments that use this images, creates a hard
# link to keep track of this. When the link count of a master image is
# equal to 1, can be deleted.
#TODO(ghe): have hard links and count links the same behaviour in all fs
#TODO(ghe): timeout and retry for downloads
def _wait_for_download():
if not os.path.exists(lock_file):
raise loopingcall.LoopingCallDone()
# If the download of the image needed is in progress (lock file present)
# we wait until the locks dissapears and create the link.
if master_path is None:
#NOTE(ghe): We don't share images between instances/hosts
images.fetch_to_raw(ctx, uuid, path, image_service)
else:
master_uuid = os.path.join(master_path,
service_utils.parse_image_ref(uuid)[0])
lock_file = os.path.join(master_path, master_uuid + '.lock')
_link_master_image(master_uuid, path)
if not os.path.exists(path):
fileutils.ensure_tree(master_path)
if not _download_in_progress(lock_file):
with fileutils.remove_path_on_error(lock_file):
#TODO(ghe): logging when image cannot be created
fd, tmp_path = tempfile.mkstemp(dir=master_path)
os.close(fd)
images.fetch_to_raw(ctx, uuid, tmp_path, image_service)
_create_master_image(tmp_path, master_uuid, path)
_remove_download_in_progress_lock(lock_file)
else:
#TODO(ghe): expiration time
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_download)
timer.start(interval=1).wait()
_link_master_image(master_uuid, path)
def _cache_tftp_images(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
d_info = _parse_driver_info(node)
fileutils.ensure_tree(
os.path.join(CONF.pxe.tftp_root, node['instance_uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
d_info['instance_name'])
for label in pxe_info:
(uuid, path) = pxe_info[label]
if not os.path.exists(path):
_get_image(ctx, path, uuid, CONF.pxe.tftp_master_path, None)
def _cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.pxe.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
d_info = _parse_driver_info(node)
fileutils.ensure_tree(_get_image_dir_path(d_info))
image_path = _get_image_file_path(d_info)
uuid = d_info['image_source']
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': uuid, 'name': d_info['instance_name']})
if not os.path.exists(image_path):
_get_image(ctx, image_path, uuid, CONF.pxe.instance_master_path)
return (uuid, image_path)
def _get_tftp_image_info(node):
"""Generate the paths for tftp files for this instance
Raises IronicException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
driver_info and defaults are not set
"""
#TODO(ghe): Called multiples times. Should we store image_info?
d_info = _parse_driver_info(node)
image_info = {
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
for label in image_info:
image_info[label][0] = str(d_info[label]).split('/')[-1]
image_info[label][1] = os.path.join(CONF.pxe.tftp_root,
node['instance_uuid'], label)
ctx = context.get_admin_context()
glance_service = service.Service(version=1, context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in ('kernel', 'ramdisk'):
image_info[label] = [None, None]
image_info[label][0] = str(iproperties[label + '_id']).split('/')[-1]
image_info[label][1] = os.path.join(CONF.pxe.tftp_root,
node['instance_uuid'], label)
return image_info
def _cache_images(node, pxe_info):
"""Prepare all the images for this instance."""
ctx = context.get_admin_context()
#TODO(ghe):parallized downloads
#TODO(ghe): Embedded image client in ramdisk
# - Get rid of iscsi, image location in baremetal service node and
# image service, no master image, no image outdated...
# - security concerns
_cache_tftp_images(ctx, node, pxe_info)
_cache_instance_image(ctx, node)
#TODO(ghe): file injection
# http://lists.openstack.org/pipermail/openstack-dev/2013-May/008728.html
# http://lists.openstack.org/pipermail/openstack-dev/2013-July/011769.html
# _inject_into_image(d_info, network_info, injected_files, admin_password)
def _destroy_images(d_info):
"""Delete instance's image file."""
image_uuid = service_utils.parse_image_ref(d_info['image_source'])[0]
utils.unlink_without_raise(_get_image_file_path(d_info))
utils.rmtree_without_raise(_get_image_dir_path(d_info))
master_image = os.path.join(CONF.pxe.instance_master_path, image_uuid)
_unlink_master_image(master_image)
def _create_pxe_config(task, node, pxe_info):
"""Generate pxe configuration file and link mac ports to it for
tftp booting.
"""
fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
node['instance_uuid']))
fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root,
'pxelinux.cfg'))
pxe_config_file_path = _get_pxe_config_file_path(node['instance_uuid'])
pxe_config = _build_pxe_config(node, pxe_info)
utils.write_to_file(pxe_config_file_path, pxe_config)
for port in _get_node_mac_addresses(task, node):
mac_path = _get_pxe_mac_path(port)
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
class PXEDeploy(base.DeployInterface):
"""PXE Deploy Interface: just a stub until the real driver is ported."""
def validate(self, node):
"""Validate the driver-specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node to validate.
:returns: InvalidParameterValue.
"""
_parse_driver_info(node)
def deploy(self, task, node):
"""Perform start deployment a node.
Given a node with complete metadata, deploy the indicated image
to the node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
:returns: deploy state DEPLOYING.
"""
pxe_info = _get_tftp_image_info(node)
_create_pxe_config(task, node, pxe_info)
_cache_images(node, pxe_info)
return states.DEPLOYING
def tear_down(self, task, node):
"""Tear down a previous deployment.
Given a node that has been previously deployed to,
do all cleanup and tear down necessary to "un-deploy" that node.
:param task: a TaskManager instance.
:param node: the Node to act upon.
:returns: deploy state DELETED.
"""
#FIXME(ghe): Possible error to get image info if eliminated from glance
# Retrieve image info and store in db
# If we keep master images, no need to get the info, we may ignore this
pxe_info = _get_tftp_image_info(node)
d_info = _parse_driver_info(node)
for label in pxe_info:
(uuid, path) = pxe_info[label]
master_path = os.path.join(CONF.pxe.tftp_master_path, uuid)
utils.unlink_without_raise(path)
_unlink_master_image(master_path)
utils.unlink_without_raise(_get_pxe_config_file_path(
node['instance_uuid']))
for port in _get_node_mac_addresses(task, node):
mac_path = _get_pxe_mac_path(port)
utils.unlink_without_raise(mac_path)
utils.rmtree_without_raise(
os.path.join(CONF.pxe.tftp_root, node['instance_uuid']))
_destroy_images(d_info)
return states.DELETED
class PXERescue(base.RescueInterface):
def validate(self, node):
pass
def rescue(self, task, node):
pass
def unrescue(self, task, node):
pass
class VendorPassthru(base.VendorInterface):
"""Interface to mix IPMI and PXE vendor-specific interfaces."""
def _get_deploy_info(self, node, **kwargs):
d_info = _parse_driver_info(node)
params = {'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(d_info),
'pxe_config_path': _get_pxe_config_file_path(
node['instance_uuid']),
'root_mb': 1024 * int(d_info['root_gb']),
'swap_mb': int(d_info['swap_mb'])
}
missing = [key for key in params.keys() if params[key] is None]
if missing:
raise exception.InvalidParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
return params
def validate(self, node, **kwargs):
method = kwargs['method']
if method == 'pass_deploy_info':
self._get_deploy_info(node, **kwargs)
elif method == 'set_boot_device':
# todo
pass
else:
raise exception.InvalidParameterValue(_(
"Unsupported method (%s) passed to PXE driver.")
% method)
return True
def _continue_deploy(self, task, node, **kwargs):
params = self._get_deploy_info(node, **kwargs)
ctx = context.get_admin_context()
node_id = node['uuid']
err_msg = kwargs.get('error')
if err_msg:
LOG.error(_('Node %(node_id)s deploy error message: %(error)s') %
{'node_id': node_id, 'error': err_msg})
LOG.info(_('start deployment for node %(node_id)s, '
'params %(params)s') %
{'node_id': node_id, 'params': params})
try:
node['provision_state'] = states.DEPLOYING
node.save(ctx)
deploy_utils.deploy(**params)
except Exception as e:
LOG.error(_('deployment to node %s failed') % node_id)
node['provision_state'] = states.DEPLOYFAIL
node.save(ctx)
raise exception.InstanceDeployFailure(_(
'Deploy error: "%(error)s" for node %(node_id)s') %
{'error': e.message, 'node_id': node_id})
else:
LOG.info(_('deployment to node %s done') % node_id)
node['provision_state'] = states.DEPLOYDONE
node.save(ctx)
def vendor_passthru(self, task, node, **kwargs):
method = kwargs['method']
if method == 'set_boot_device':
return node.driver.vendor._set_boot_device(
task, node,
kwargs.get('device'),
kwargs.get('persistent'))
elif method == 'pass_deploy_info':
self._continue_deploy(task, node, **kwargs)
|
|
"""Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
from rflink.protocol import create_rflink_connection
from serial import SerialException
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_COMMAND,
CONF_DEVICE_ID,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import RestoreEntity
from .utils import brightness_to_rflink
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = "event"
CONF_ALIASES = "aliases"
CONF_GROUP_ALIASES = "group_aliases"
CONF_GROUP = "group"
CONF_NOGROUP_ALIASES = "nogroup_aliases"
CONF_DEVICE_DEFAULTS = "device_defaults"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_FIRE_EVENT = "fire_event"
CONF_IGNORE_DEVICES = "ignore_devices"
CONF_RECONNECT_INTERVAL = "reconnect_interval"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_WAIT_FOR_ACK = "wait_for_ack"
CONF_KEEPALIVE_IDLE = "tcp_keepalive_idle_timer"
DATA_DEVICE_REGISTER = "rflink_device_register"
DATA_ENTITY_LOOKUP = "rflink_entity_lookup"
DATA_ENTITY_GROUP_LOOKUP = "rflink_entity_group_only_lookup"
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
DEFAULT_TCP_KEEPALIVE_IDLE_TIMER = 3600
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = "button_pressed"
EVENT_KEY_COMMAND = "command"
EVENT_KEY_ID = "id"
EVENT_KEY_SENSOR = "sensor"
EVENT_KEY_UNIT = "unit"
RFLINK_GROUP_COMMANDS = ["allon", "alloff"]
DOMAIN = "rflink"
SERVICE_SEND_COMMAND = "send_command"
SIGNAL_AVAILABILITY = "rflink_device_available"
SIGNAL_HANDLE_EVENT = "rflink_handle_event_{}"
SIGNAL_EVENT = "rflink_event"
TMP_ENTITY = "tmp.{}"
DEVICE_DEFAULTS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(
CONF_KEEPALIVE_IDLE, default=DEFAULT_TCP_KEEPALIVE_IDLE_TIMER
): int,
vol.Optional(
CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL
): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SEND_COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string}
)
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown"
async def async_setup(hass, config):
"""Set up the Rflink component."""
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {EVENT_KEY_COMMAND: defaultdict(list)}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug("Rflink command for %s", str(call.data))
if not (
await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND)
)
):
_LOGGER.error("Failed Rflink command for %s", str(call.data))
else:
async_dispatcher_send(
hass,
SIGNAL_EVENT,
{
EVENT_KEY_ID: call.data.get(CONF_DEVICE_ID),
EVENT_KEY_COMMAND: call.data.get(CONF_COMMAND),
},
)
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA
)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug("event of type %s: %s", event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug("unhandled event of type: %s", event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID)
is_group_event = (
event_type == EVENT_KEY_COMMAND
and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS
)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, []
)
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug("entity_ids: %s", entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug("passing event to %s", entity)
async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug("device_id not known, adding new device")
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][event_id].append(
TMP_ENTITY.format(event_id)
)
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event)
)
else:
_LOGGER.debug("device_id not known and automatic add disabled")
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
keepalive_idle_timer = None
# TCP KeepAlive only if this is TCP based connection (not serial)
if host is not None:
# TCP KEEPALIVE will be enabled if value > 0
keepalive_idle_timer = config[DOMAIN][CONF_KEEPALIVE_IDLE]
if keepalive_idle_timer < 0:
_LOGGER.error(
"A bogus TCP Keepalive IDLE timer was provided (%d secs), "
"it will be disabled. "
"Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
keepalive_idle_timer = None
elif keepalive_idle_timer == 0:
keepalive_idle_timer = None
elif keepalive_idle_timer <= 30:
_LOGGER.warning(
"A very short TCP Keepalive IDLE timer was provided (%d secs) "
"and may produce unexpected disconnections from RFlink device."
" Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning("Disconnected from Rflink, reconnecting")
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info("Initiating Rflink connection")
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
keepalive=keepalive_idle_timer,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES],
)
try:
async with async_timeout.timeout(CONNECTION_TIMEOUT):
transport, protocol = await connection
except (
SerialException,
OSError,
asyncio.TimeoutError,
) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s", reconnect_interval
)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()
)
_LOGGER.info("Connected to Rflink")
hass.async_create_task(connect())
async_dispatcher_connect(hass, SIGNAL_EVENT, event_callback)
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(
self,
device_id,
initial_event=None,
name=None,
aliases=None,
group=True,
group_aliases=None,
nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS,
):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_write_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(
EVENT_BUTTON_PRESSED,
{ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND]},
)
_LOGGER.debug(
"Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]
)
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id].append(
self.entity_id
)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == "turn_on":
cmd = "on"
self._state = True
elif command == "turn_off":
cmd = "off"
self._state = False
elif command == "dim":
# convert brightness to rflink dim level
cmd = str(brightness_to_rflink(args[0]))
self._state = True
elif command == "toggle":
cmd = "on"
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == "close_cover":
cmd = "DOWN"
self._state = False
elif command == "open_cover":
cmd = "UP"
self._state = True
elif command == "stop_cover":
cmd = "STOP"
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
self.async_write_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug("Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError("Cannot send command, not connected!")
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been sent out.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1)
)
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
if (old_state := await self.async_get_last_state()) is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon"]:
self._state = True
elif command in ["off", "alloff"]:
self._state = False
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._async_handle_command("turn_on")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_handle_command("turn_off")
|
|
import os
import sys
import unittest
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import TuneError, register_trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator
class TrialRunnerTest(unittest.TestCase):
def setUp(self):
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
_register_all() # re-register the evicted objects
def tearDown(self):
ray.shutdown()
def testTrialStatus(self):
ray.init(num_cpus=2)
trial = Trial("__fake")
trial_executor = RayTrialExecutor()
self.assertEqual(trial.status, Trial.PENDING)
trial_executor.start_trial(trial)
self.assertEqual(trial.status, Trial.RUNNING)
trial_executor.stop_trial(trial)
self.assertEqual(trial.status, Trial.TERMINATED)
trial_executor.stop_trial(trial, error=True)
self.assertEqual(trial.status, Trial.ERROR)
def testExperimentTagTruncation(self):
ray.init(num_cpus=2)
def train(config, reporter):
reporter(timesteps_total=1)
trial_executor = RayTrialExecutor()
register_trainable("f1", train)
experiments = {
"foo": {
"run": "f1",
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40)
},
}
}
for name, spec in experiments.items():
trial_generator = BasicVariantGenerator()
trial_generator.add_configurations({name: spec})
while not trial_generator.is_finished():
trial = trial_generator.next_trial()
if not trial:
break
trial_executor.start_trial(trial)
self.assertLessEqual(len(os.path.basename(trial.logdir)), 200)
trial_executor.stop_trial(trial)
def testExtraResources(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, extra_cpu=3, extra_gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testExtraCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(
cpu=1, gpu=0, extra_custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertTrue(sum(t.status == Trial.RUNNING for t in trials) < 2)
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources2(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
resource1 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource1))
resource2 = Resources(cpu=1, gpu=0, custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource2))
resource3 = Resources(cpu=1, gpu=0, custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource3))
resource4 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource4))
def testFractionalGpus(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=0.5),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
for _ in range(10):
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.PENDING)
self.assertEqual(trials[3].status, Trial.PENDING)
def testResourceNumericalError(self):
resource = Resources(cpu=0.99, gpu=0.99, custom_resources={"a": 0.99})
small_resource = Resources(
cpu=0.33, gpu=0.33, custom_resources={"a": 0.33})
for i in range(3):
resource = Resources.subtract(resource, small_resource)
self.assertTrue(resource.is_nonnegative())
def testResourceScheduler(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.TERMINATED)
def testMultiStepRun(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testMultiStepRun2(self):
"""Checks that runner.step throws when overstepping."""
ray.init(num_cpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertRaises(TuneError, runner.step)
def testChangeResources(self):
"""Checks that resource requirements can be changed on fly."""
os.environ["TUNE_PLACEMENT_GROUP_AUTO_DISABLED"] = "1"
ray.init(num_cpus=2)
class ChangingScheduler(FIFOScheduler):
def on_trial_result(self, trial_runner, trial, result):
if result["training_iteration"] == 1:
executor = trial_runner.trial_executor
executor.stop_trial(trial)
trial.update_resources(dict(cpu=2, gpu=0))
executor.start_trial(trial)
return TrialScheduler.CONTINUE
runner = TrialRunner(scheduler=ChangingScheduler())
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 1)
self.assertRaises(
ValueError, lambda: trials[0].update_resources(dict(cpu=2, gpu=0)))
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 2)
def testQueueFilling(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
ray.init(num_cpus=4)
def f1(config):
for i in range(10):
yield i
tune.register_trainable("f1", f1)
search_alg = BasicVariantGenerator()
search_alg.add_configurations({
"foo": {
"run": "f1",
"num_samples": 100,
"config": {
"a": tune.sample_from(lambda spec: 5.0 / 7),
"b": tune.sample_from(lambda spec: "long" * 40)
},
"resources_per_trial": {
"cpu": 2
}
}
})
runner = TrialRunner(search_alg=search_alg)
runner.step()
runner.step()
runner.step()
self.assertEqual(len(runner._trials), 3)
runner.step()
self.assertEqual(len(runner._trials), 3)
self.assertEqual(runner._trials[0].status, Trial.RUNNING)
self.assertEqual(runner._trials[1].status, Trial.RUNNING)
self.assertEqual(runner._trials[2].status, Trial.PENDING)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
#!/usr/bin/env python
import breakdancer
from breakdancer import Condition, Effect, Action, Driver
TESTKEY = 'testkey'
######################################################################
# Conditions
######################################################################
class ExistsCondition(Condition):
def __call__(self, state):
return TESTKEY in state
class ExistsAsNumber(Condition):
def __call__(self, state):
try:
int(state[TESTKEY])
return True
except:
return False
class MaybeExistsAsNumber(ExistsAsNumber):
def __call__(self, state):
return TESTKEY not in state or ExistsAsNumber.__call__(self, state)
class DoesNotExistCondition(Condition):
def __call__(self, state):
return TESTKEY not in state
class NothingExistsCondition(Condition):
def __call__(self, state):
return not bool(state)
######################################################################
# Effects
######################################################################
class StoreEffect(Effect):
def __init__(self, v='0'):
self.v = v
def __call__(self, state):
state[TESTKEY] = self.v
class DeleteEffect(Effect):
def __call__(self, state):
del state[TESTKEY]
class FlushEffect(Effect):
def __call__(self, state):
state.clear()
class AppendEffect(Effect):
suffix = '-suffix'
def __call__(self, state):
state[TESTKEY] = state[TESTKEY] + self.suffix
class PrependEffect(Effect):
prefix = 'prefix-'
def __call__(self, state):
state[TESTKEY] = self.prefix + state[TESTKEY]
class ArithmeticEffect(Effect):
default = '0'
def __init__(self, by=1):
self.by = by
def __call__(self, state):
if TESTKEY in state:
state[TESTKEY] = str(max(0, int(state[TESTKEY]) + self.by))
else:
state[TESTKEY] = self.default
######################################################################
# Actions
######################################################################
class Set(Action):
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Add(Action):
preconditions = [DoesNotExistCondition()]
effect = StoreEffect()
postconditions = [ExistsCondition()]
class Delete(Action):
preconditions = [ExistsCondition()]
effect = DeleteEffect()
postconditions = [DoesNotExistCondition()]
class Flush(Action):
effect = FlushEffect()
postconditions = [NothingExistsCondition()]
class Delay(Flush):
pass
class Append(Action):
preconditions = [ExistsCondition()]
effect = AppendEffect()
postconditions = [ExistsCondition()]
class Prepend(Action):
preconditions = [ExistsCondition()]
effect = PrependEffect()
postconditions = [ExistsCondition()]
class Incr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class Decr(Action):
preconditions = [ExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
class IncrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(1)
postconditions = [ExistsAsNumber()]
class DecrWithDefault(Action):
preconditions = [MaybeExistsAsNumber()]
effect = ArithmeticEffect(-1)
postconditions = [ExistsAsNumber()]
######################################################################
# Driver
######################################################################
class EngineTestAppDriver(Driver):
def preSuite(self, seq):
print '/* DO NOT EDIT.. GENERATED SOURCE */'
print ""
print '#include "testsuite/breakdancer/suite_stubs.h"'
def testName(self, seq):
return 'test_' + '_'.join(a.name for a in seq)
def startSequence(self, seq):
f = "static enum test_result %s" % self.testName(seq)
print ("%s(ENGINE_HANDLE *h,\n%sENGINE_HANDLE_V1 *h1) {"
% (f, " " * (len(f) + 1)))
def startAction(self, action):
if isinstance(action, Delay):
s = " delay(expiry+1);"
elif isinstance(action, Flush):
s = " flush(h, h1);"
elif isinstance(action, Delete):
s = ' del(h, h1);'
else:
s = ' %s(h, h1);' % (action.name)
print s
def postSuite(self, seq):
print """MEMCACHED_PUBLIC_API
engine_test_t* get_tests(void) {
static engine_test_t tests[] = {
"""
for seq in sorted(seq):
print ' {"%s",\n %s,\n test_setup, teardown, NULL},' % (
', '.join(a.name for a in seq),
self.testName(seq))
print """ {NULL, NULL, NULL, NULL, NULL}
};
return tests;
}"""
def endSequence(self, seq, state):
val = state.get(TESTKEY)
if val:
print ' checkValue(h, h1, "%s");' % val
else:
print ' assertNotExists(h, h1);'
print " return SUCCESS;"
print "}"
print ""
def endAction(self, action, state, errored):
value = state.get(TESTKEY)
if value:
vs = ' /* value is "%s"' % value + " */"
else:
vs = ' /* value is not defined */'
if errored:
print " assertHasError();" + vs
else:
print " assertHasNoError();" + vs
if __name__ == '__main__':
breakdancer.runTest(breakdancer.findActions(globals().values()),
EngineTestAppDriver())
|
|
from __future__ import division
import math
import random
import urllib
import os.path
import sys
import time
import logging
logger = logging.getLogger(__name__)
from PIL import Image, ImageDraw
from PIL.ImageFilter import EDGE_ENHANCE
from molly.maps.osm.models import OSMTile, get_marker_dir
def log2(x):
"""
@return: log(x)/log(2)
"""
return math.log(x)/math.log(2)
def get_tile_ref(lon_deg, lat_deg, zoom):
"""
Gets OSM tile co-ordinates for the specified longitude, latitude and zoom
level.
@param lon_deg: The longitude, in degrees
@type lon_deg: float
@param lat_deg: The latitude, in degrees
@type lat_deg: float
@param zoom: The zoom level to get the tile references for
@type zoom: int
@return: A tuple of (x, y) co-ordinates for the OSM tile
"""
lat_rad = lat_deg * math.pi / 180.0
n = 2.0 ** zoom
xtile = (lon_deg + 180.0) / 360.0 * n
ytile = (1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n
return (xtile, ytile)
def get_tile_geo(xtile, ytile, zoom):
"""
Gets the latitude and longitude corresponding to a particular set of OSM
tile co-ordinates.
@param lon_deg: The longitude, in degrees
@type lon_deg: int
@param lat_deg: The latitude, in degrees
@type lat_deg: int
@param zoom: The zoom level this tile exists at
@type zoom: int
@return: A tuple of (long, lat) co-ordinates for the OSM tile
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = lat_rad * 180.0 / math.pi
return (lon_deg, lat_deg)
def minmax(i):
"""
Gets the minimum and maximum values in some list
@param i: The list for the maximum and minimum values to be obtained from
@type i: list
@return: A tuple (min, max)
"""
min_, max_ = float('inf'), float('-inf')
for e in i:
min_ = min(min_, e)
max_ = max(max_, e)
return min_, max_
def get_map(points, width, height, filename, zoom=None, lon_center=None,
lat_center=None, paths=[]):
"""
Generates a map for the passed in arguments, saving that to filename
@param points: The points where markers on the map should be added. This
should be a list of tuples corresponding to the points where
markers should be added. These tuples should be in the form
(latitude, longitude, colour, index), where acceptable values
of colour are specified in @C{utils.MARKER_COLOURS}, and
index is the number to appear on the marker, or None if
you want a star to appear
@type points: list
@param width: The width of the generated map image, in pixels
@type width: int
@param height: The height of the generated map image, in pixels
@type height: int
@param filename: The name of the file to write the generated map to
@type filename: str
@param zoom: The maximum zoom level which to generate this map at
@type zoom: int
@param lon_center: The actual center of the generated map
@type lon_center: float
@param lat_center: The actual center of the generated map
@type lat_center: float
"""
lon_min, lon_max = minmax(p[0] for p in points)
lat_min, lat_max = minmax(p[1] for p in points)
if not zoom:
size = min(width, height)
if lat_min != lat_max:
zoom = int(log2(360/abs(lat_min - lat_max)) + log2(size/256)-1.0)
else:
zoom = 16
points = [(get_tile_ref(p[0], p[1], zoom), p[2], p[3]) for p in points]
lon_range, lat_range = lon_max - lon_min, lat_min - lat_max
if not lat_center:
lon_center, lat_center = (lon_min + lon_max)/2, (lat_min + lat_max)/2
tx_min, tx_max = map(int, minmax(p[0][0] for p in points))
ty_min, ty_max = map(int, minmax(p[0][1] for p in points))
ty_max, tx_max = ty_max+1, tx_max+1
cx, cy = get_tile_ref(lon_center, lat_center, zoom)
oxc = int((cx - tx_min) * 256 - width/2)
oyc = int((cy - ty_min) * 256 - height/2)
ox, oy = oxc, oyc-10
tx_min_ = int(tx_min + ox/256)
tx_max_ = int(tx_max + (width+ox)/256)
ty_min_ = int(ty_min + oy/256)
ty_max_ = int(ty_max + (height+oy)/256)
tiles = [{ 'ref':(tx, ty) }
for tx in range(tx_min_, tx_max_) for ty in range(ty_min_, ty_max_)]
# Create a new blank image for us to add the tiles on to
image = Image.new('RGBA', (width, height))
# Keep track of if the image if malformed or not
malformed = False
# Lots of different tiles contain the parts we're interested in, so take the
# parts of those tiles, and copy them into our new image
for tile in tiles:
offx = (tile['ref'][0] - tx_min) * 256 - ox
offy = (tile['ref'][1] - ty_min) * 256 - oy
if not (-256 < offx and offx < width and -256 < offy and offy < height):
continue
try:
tile_data = OSMTile.get_data(tile['ref'][0], tile['ref'][1], zoom)
tile['surface'] = Image.open(tile_data)
except Exception as e:
logger.exception('Failed to fetch OSM tile')
tile['surface'] = Image.open(os.path.join(os.path.dirname(__file__), 'fallback', 'fallback.png'))
malformed = True
image.paste(tile['surface'], ((tile['ref'][0] - tx_min) * 256 - ox, (tile['ref'][1] - ty_min) * 256 - oy))
# Now add the paths to the image
paths_canvas = Image.new('RGBA', (width, height))
drawing = ImageDraw.Draw(paths_canvas)
for path, colour in paths:
drawing.line(map(lambda (x,y): (int((x - tx_min) * 256 - ox),
int((y - ty_min) * 256 - oy)),
map(lambda x: get_tile_ref(*x, zoom=zoom), path.coords)),
fill=colour, width=4)
paths_canvas = paths_canvas.filter(EDGE_ENHANCE) # Anti-alias
# 50% transparency
paths_canvas = Image.blend(paths_canvas, Image.new('RGBA', (width, height)), 0.5)
image.paste(paths_canvas, None, paths_canvas)
# Now add the markers to the image
points.sort(key=lambda p:p[0][1])
marker_dir = get_marker_dir()
for (tx, ty), color, index in points:
if index is None:
off, fn = (10, 10), "%s_star.png" % color
else:
off, fn = (10, 25), "%s_%d.png" % (color, index)
fn = os.path.join(marker_dir, fn)
marker = Image.open(fn)
off = (
int((tx - tx_min) * 256 - off[0] - ox),
int((ty - ty_min) * 256 - off[1] - oy),
)
image.paste(marker, (off[0], off[1]), marker)
image.save(filename, 'png')
if malformed:
raise MapGenerationError((lon_center, lat_center))
return lon_center, lat_center
class PointSet(set):
def __init__(self, initial=None):
"""
@param initial: An initial point set to use
@type initial: ( (float, float) )
"""
super(PointSet, self).__init__(initial)
self._min = (float('inf'), float('inf'))
self._max = (float('-inf'), float('-inf'))
self.ordered = []
for p in initial:
self.update(p)
def add(self, point):
"""
Add a point to the set
@param point: The point to be added
@type point: (float, float)
"""
super(PointSet, self).add(point)
self.update(point)
def remove(self, point):
"""
Remove a point from the set
@param point: The point to be removed
@type point: (float, float)
"""
self.ordered.remove(point)
super(PointSet, self).remove(point)
if any((point[i] in (self._min[i], self._max[i])) for i in range(2)):
self._min = (float('inf'), float('inf'))
self._max = (float('-inf'), float('-inf'))
for point in self:
self._min = (min(self._min[0], point[0]),
min(self._min[1], point[1]))
self._max = (max(self._max[0], point[0]),
max(self._max[1], point[1]))
def update(self, point):
"""
Update the set
@param point: The point to be added
@type point: (float, float)
"""
self.ordered.append(point)
self._min = min(self._min[0], point[0]), min(self._min[1], point[1])
self._max = max(self._max[0], point[0]), max(self._max[1], point[1])
def extent(self, zoom):
"""
Get the bounding box of this set of points
@param zoom: The zoom level to use
"""
top_left = get_tile_ref(self._min[0], self._min[1], zoom)
bottom_right = get_tile_ref(self._max[0], self._max[1], zoom)
a = (bottom_right[0]-top_left[0])*256, (top_left[1]-bottom_right[1])*256
return a
def contained_within(self, box, zoom):
"""
Check if @C{box} is inside this pointset at the specified zoom level
"""
extent = self.extent(zoom)
return extent[0] <= box[0] and extent[1] <= box[1]
def get_fitted_map(centre_point, points, min_points, zoom, width, height,
extra_points, paths, filename):
"""
Given a list of points and some minimum number of points, then a "fitted
map" is generated, which is one which contains at least @C{min_points}, and
is at least at the zoom level @C{zoom}, but also contains any other points
in the list which is inside the bounding area of this minimal map.
Valid colours in point definitions below are defined in @C{MARKER_COLOURS}
@param centre_point: A tuple of longitude, latitude and colour corresponding
to the "centre" of the map. This is NOT the central
latitude/longitude of the generated image, which is
simply the middle of the set of points passed in, but
simply a special marker which is indicated with a star.
@type centre_point: (float, float, str) or None
@param points: An (ordered) list of points to be plotted on the map. These
are indicated on the map with numbered markers. This list
consists of tuples of longitude, latitude and a string
indicating the colours of the markers to be rendered.
@type points: [(float, float, str)]
@param min_points: The minimum number of points to be displayed on the
resulting map
@type min_points: int
@param zoom: A bound on the maximum zoom level to be rendered. If this zoom
level is too small to fit @C{min_points} points on it, then the
map will be zoomed out further to fit in. If this is None, then
this is equivalent to the smallest zoom level.
@type zoom: int
@param width: The width of the generated map image, in pixels
@type width: int
@param height: The height of the generated map image, in pixels
@type height: int
@raise MapGenerationError: If a map can not be generated (normally if the
OSM tile server is down)
"""
# If we haven't been given a zoom, start as close as we can
if not zoom:
zoom = 18
box = max(64, width - 20), max(64, height-35)
new_points = []
for i, point in enumerate(points):
if i>1 and point == new_points[-1][0]:
new_points[-1][1].append(i)
else:
new_points.append( (point, [i]) )
points = [p[0] for p in new_points]
# Include extra_points in bounding_box
points = list(extra_points) + points
min_points += len(extra_points)
# Include the central point in the points to be considered
if centre_point:
points = [centre_point] + points
# Get a set of the minimum points
point_set, points = PointSet(points[:min_points+1]), points[min_points+1:]
# Zoom out until the entire point set fits inside the generated map
while not point_set.contained_within(box, zoom):
zoom -= 1
# If there are points outside the minimum points, see if they fit inside
# the bounding box of the minimum points (or the specified zoom level)
while point_set.contained_within(box, zoom):
if not points:
break
new_point, points = points[0], points[1:]
point_set.add(new_point)
else:
point_set.remove(new_point)
points = [(p[0], p[1], p[2], None) for p in extra_points]
if centre_point:
used_points = point_set.ordered[len(extra_points)+1:]
points.append((centre_point[0], centre_point[1], centre_point[2], None))
else:
used_points = point_set.ordered[len(extra_points):]
for i, point in enumerate(used_points):
points.append(
(point[0], point[1], point[2], i+1)
)
if centre_point:
new_points = new_points[:len(point_set)-1]
else:
new_points = new_points[:len(point_set)]
try:
lon_center, lat_center = get_map(points, width, height, filename, zoom,
paths=paths)
except MapGenerationError as e:
e.metadata = (new_points, zoom, e.metadata[0], e.metadata[1])
raise
return new_points, zoom, lon_center, lat_center
class MapGenerationError(Exception):
"""
Indicates that a map was unable to be successfully generated, but one was
still attempted to be, in which case the metadata of the generated map can
be attached to this.
"""
def __init__(self, metadata=None):
self.metadata = metadata
if __name__ == '__main__':
RED, GREEN, BLUE = (1, 0, 0), (0, 0.5, 0), (0.25, 0.25, 1)
get_map(
[
(51.760283, -1.259941, 'blue', None),
(51.760565, -1.259021, 'red', 1),
(51.760009, -1.260275, 'green', 2),
(51.760294, -1.258813, 'red', 3),
(51.759805, -1.261170, 'green', 4),
(51.759810, -1.261359, 'red', 5),
(51.759662, -1.261110, 'green', 6),
(51.759520, -1.260638, 'red', 7),
(51.759247, -1.259904, 'green', 8),
(51.759173, -1.259880, 'red', 9),
], 300, 200, 'foo.png')
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements inspection commands for buildtool.
1) buildtool.sh collect_bom_versions
2) buildtool.sh collect_artifact_versions
3) buildtool.sh audit_artifact_versions
This will produce files of things to prune.
The should be reviewed. Those remove those that you wish to keep.
Then to remove each of the artifacts:
for url in $(cat prune_jars.txt); do
curl -s -u$BINTRAY_USER:$BINTRAY_KEY -X DELETE $url &
done
wait
for url in $(cat prune_debians.txt); do
curl -s -u$BINTRAY_USER:$BINTRAY_KEY -X DELETE $url &
done
wait
for url in $(cat prune_containers.txt); do
gcloud -q container images delete $url --force-delete-tags &
done
wait
for image_name in $(cat prune_images.txt); do
gcloud -q compute images --project $PROJECT delete $image_name &
done
for url in $(cat prune_boms.txt); do
gsutil rm $url
done
"""
from threading import current_thread
from multiprocessing.pool import ThreadPool
import base64
import json
import logging
import os
import re
import sys
import yaml
try:
from urllib2 import urlopen, HTTPError, Request
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from buildtool import (
CommandFactory,
CommandProcessor,
SemanticVersion,
check_options_set,
check_path_exists,
check_subprocess,
exception_to_message,
maybe_log_exception,
raise_and_log_error,
write_to_path,
ConfigError,
UnexpectedError,
ResponseError)
def my_unicode_representer(self, data):
return self.represent_str(data.encode('utf-8'))
if sys.version_info[0] == 2:
yaml.representer.Representer.add_representer(unicode, my_unicode_representer)
yaml.Dumper.ignore_aliases = lambda *args: True
class CollectBomVersions(CommandProcessor):
"""Determine which artifact versions are in use by which boms.
Ultimately this produces an inverse map of boms. Whereas a bom
maps to a collection of services and their build information, this
produces a map of service build information and which boms they apepar in.
Historically boms were unique builds, however this wasnt supposed to be
the case, and is no longer the case.
The map is further partitioned into two files where one contains
released boms and the other unreleased boms. Unreleased boms are not
necessarily obsolete.
Emits files:
bom_list.txt: A list of all the boms, released and unreleased
bad_boms.txt: A list of malformed boms with what makes it malformed.
all_bom_sevice_map.yml: The inverse service version mapping of all the boms
released_bom_service_map.yml: The subset of all_bom_service_map for boms
that were released.
unreleased_bom_service_map.yml: The subset of all_bom_service_map for
service versions that only appear in unreleased boms.
nonstandard_boms.txt: A list of boms whose artifactSources do not match
the values specified via options. Unspecified options match anything.
config.yml: The configuration values used to determine standard compliance.
"""
RELEASED_VERSION_MATCHER = re.compile(r'^\d+(?:\.\d+){2}$')
@staticmethod
def url_to_bom_name(url):
"""Given a url to a bom, return the name of the bom."""
name = url
dash = name.rfind('/')
if dash >= 0:
name = name[dash + 1:]
return os.path.splitext(name)[0]
def __init__(self, factory, options, **kwargs):
if options.bintray_org is None != options.bintray_debian_repository is None:
raise_and_log_error(
ConfigError('Either neither or both "bintray_org"'
' and "bintray_debian_repository" should be specified'))
self.__bad_files = {}
self.__non_standard_boms = {}
# We're going to have a bunch of threads each writing into different keys
# in order to deconflict with one another lockless. Then we'll aggregate
# it all together when we're done processing for a single aggregate result.
self.__per_thread_result_map = {}
self.__expect_docker_registry = options.docker_registry
self.__expect_debian_repository = (
'https://dl.bintray.com/%s/%s' % (options.bintray_org,
options.bintray_debian_repository)
if options.bintray_org
else None)
super(CollectBomVersions, self).__init__(
factory, options, **kwargs)
def load_bom_from_url(self, url):
"""Returns the bom specification dict from a gcs url."""
logging.debug('Loading %s', url)
try:
text = check_subprocess('gsutil cat ' + url)
return yaml.safe_load(text)
except Exception as ex:
self.__bad_files[self.url_to_bom_name(url)] = exception_to_message(ex)
maybe_log_exception('load_from_from_url', ex,
action_msg='Skipping %s' % url)
return None
def extract_bom_info(self, bom):
"""Return a minimal dict identifying this BOM.
This also includes non-standard config specified by this BOM.
"""
info = {
'bom_version': bom['version'],
'bom_timestamp': bom.get('timestamp', 'NotRecorded')
}
artifact_sources = bom.get('artifactSources')
if artifact_sources is None:
logging.warning('%s does not have artifactSources', bom['version'])
return info
def add_if_nonstandard(name, expect):
if artifact_sources[name] != expect:
logging.warning('%s has nonstandard %s = %s',
bom['version'], name, artifact_sources[name])
info[name] = artifact_sources[name]
add_if_nonstandard('dockerRegistry', self.__expect_docker_registry)
add_if_nonstandard('debianRepository', self.__expect_debian_repository)
if len(info) > 2:
problems = dict(info)
del problems['bom_version']
del problems['bom_timestamp']
self.__non_standard_boms[bom['version']] = problems
return info
def analyze_bom(self, bom):
"""Analyzes one bom and breaks it down into this threads result_map.
Boms are processed within a single thread, but multiple boms can be
processed in different threads.
"""
tid = current_thread().name
thread_service_map = self.__per_thread_result_map.get(tid, {})
self.__per_thread_result_map[tid] = thread_service_map
bom_info = self.extract_bom_info(bom)
for name, entry in bom['services'].items():
if name == 'defaultArtifact':
continue
build_version = entry['version']
parts = build_version.split('-', 1)
if len(parts) == 1:
version = parts[0]
buildnum = 'NotRecorded'
else:
version, buildnum = parts
commit = entry.get('commit', 'NotRecorded')
service_record = thread_service_map.get(name)
if service_record is None:
service_record = {}
thread_service_map[name] = service_record
version_map = service_record.get(version)
if version_map is None:
version_map = {}
service_record[version] = version_map
commit_map = version_map.get(commit)
if commit_map is None:
commit_map = {}
version_map[commit] = commit_map
build_list = commit_map.get(buildnum)
if build_list is None:
build_list = []
commit_map[buildnum] = build_list
build_list.append(bom_info)
def ingest_bom(self, line):
"""Function to ingest a single bom into the result map."""
bom = self.load_bom_from_url(line)
if not bom:
return
try:
if bom['version'] + '.yml' != line[line.rfind('/') + 1:]:
message = 'BOM version "%s" != filename "%s"' % (bom['version'], line)
self.__bad_files[self.url_to_bom_name(line.strip())] = message
logging.warning(message)
raise_and_log_error(UnexpectedError(message))
self.analyze_bom(bom)
except Exception as ex:
self.__bad_files[self.url_to_bom_name(line.strip())] = (
exception_to_message(ex))
maybe_log_exception('analyze_bom', ex,
action_msg='Skipping %s' % line)
def join_result_maps(self):
"""Join the individual thread result maps into a single one.
This assumes a single threaded environment.
"""
def join_buildnums(commit_buildnums, result_buildnums):
for buildnum, info_list in commit_buildnums.items():
result_info_list = result_buildnums.get(buildnum)
if result_info_list is None:
result_info_list = []
result_buildnums[buildnum] = result_info_list
result_info_list.extend(info_list)
result_info_list.sort(key=lambda info: info['bom_timestamp'])
def join_commits(commit_map, result_commits):
for commit, commit_buildnums in commit_map.items():
result_buildnums = result_commits.get(commit)
if result_buildnums is None:
result_buildnums = {}
result_commits[commit] = result_buildnums
join_buildnums(commit_buildnums, result_buildnums)
def join_versions(version_map, result_versions):
for version, commit_map in version_map.items():
result_commits = result_versions.get(version)
if result_commits is None:
result_commits = {}
result_versions[version] = result_commits
join_commits(commit_map, result_commits)
def join_results(thread_results, result_map):
for name, version_map in thread_results.items():
result_versions = result_map.get(name)
if result_versions is None:
result_versions = {}
result_map[name] = result_versions
join_versions(version_map, result_versions)
result_map = {}
for thread_results in self.__per_thread_result_map.values():
join_results(thread_results, result_map)
return result_map
def ingest_bom_list(self, bom_list):
"""Ingest each of the boms."""
max_threads = 1 if self.options.one_at_a_time else 64
pool = ThreadPool(min(max_threads, len(bom_list)))
pool.map(self.ingest_bom, bom_list)
pool.close()
pool.join()
return self.join_result_maps()
def list_bom_urls(self, gcs_dir_url_prefix):
"""Get a list of all the bom versions that exist."""
result = check_subprocess('gsutil ls ' + gcs_dir_url_prefix)
return [line for line in result.split('\n')
if line.startswith(gcs_dir_url_prefix) and line.endswith('.yml')]
def _do_command(self):
"""Reads the list of boms, then concurrently processes them.
Ultimately it will write out the analysis into bom_service_map.yml
"""
options = self.options
url_prefix = 'gs://%s/bom/' % options.halyard_bom_bucket
if options.version_name_prefix:
url_prefix += options.version_name_prefix
logging.debug('Listing BOM urls')
results = self.list_bom_urls(url_prefix)
write_to_path('\n'.join(sorted(results)),
os.path.join(self.get_output_dir(), 'bom_list.txt'))
result_map = self.ingest_bom_list(results)
path = os.path.join(self.get_output_dir(), 'all_bom_service_map.yml')
logging.info('Writing bom analysis to %s', path)
write_to_path(yaml.safe_dump(result_map, default_flow_style=False), path)
partition_names = ['released', 'unreleased']
partitions = self.partition_service_map(result_map)
for index, data in enumerate(partitions):
path = os.path.join(self.get_output_dir(),
partition_names[index] + '_bom_service_map.yml')
logging.info('Writing bom analysis to %s', path)
write_to_path(yaml.safe_dump(data, default_flow_style=False), path)
if self.__bad_files:
path = os.path.join(self.get_output_dir(), 'bad_boms.txt')
logging.warning('Writing %d bad URLs to %s', len(self.__bad_files), path)
write_to_path(
yaml.safe_dump(self.__bad_files, default_flow_style=False),
path)
if self.__non_standard_boms:
path = os.path.join(self.get_output_dir(), 'nonstandard_boms.txt')
logging.warning('Writing %d nonstandard boms to %s',
len(self.__non_standard_boms), path)
write_to_path(
yaml.safe_dump(self.__non_standard_boms, default_flow_style=False),
path)
config = {
'halyard_bom_bucket': options.halyard_bom_bucket
}
path = os.path.join(self.get_output_dir(), 'config.yml')
logging.info('Writing to %s', path)
write_to_path(yaml.safe_dump(config, default_flow_style=False), path)
def partition_service_map(self, result_map):
def partition_info_list(info_list):
released = []
unreleased = []
for info in info_list:
if self.RELEASED_VERSION_MATCHER.match(info['bom_version']):
released.append(info)
else:
unreleased.append(info)
if released:
# If we released this somewhere, then it isnt unreleased.
unreleased = []
return released, unreleased
def partition_buildnum_map(buildnum_map):
released = {}
unreleased = {}
for buildnum, info_list in buildnum_map.items():
results = partition_info_list(info_list)
if results[0]:
released[buildnum] = results[0]
if results[1]:
unreleased[buildnum] = results[1]
return released, unreleased
def partition_commit_map(commit_map):
released = {}
unreleased = {}
for commit, buildnum_map in commit_map.items():
results = partition_buildnum_map(buildnum_map)
if results[0]:
released[commit] = results[0]
if results[1]:
unreleased[commit] = results[1]
return released, unreleased
def partition_version_map(version_map):
released = {}
unreleased = {}
for version, commit_map in version_map.items():
results = partition_commit_map(commit_map)
if results[0]:
released[version] = results[0]
if results[1]:
unreleased[version] = results[1]
if not released:
released = None
if not unreleased:
unreleased = None
return released, unreleased
released = {}
unreleased = {}
for name, version_map in result_map.items():
released[name], unreleased[name] = partition_version_map(version_map)
return released, unreleased
class CollectBomVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(CollectBomVersionsFactory, self).__init__(
'collect_bom_versions', CollectBomVersions,
'Find information about bom versions.', **kwargs)
def init_argparser(self, parser, defaults):
super(CollectBomVersionsFactory, self).init_argparser(parser, defaults)
self.add_argument(parser, 'version_name_prefix', defaults, None,
help='Prefix for bom version to collect.')
self.add_argument(
parser, 'halyard_bom_bucket', defaults, 'halconfig',
help='The bucket managing halyard BOMs and config profiles.')
self.add_argument(
parser, 'docker_registry', defaults, None,
help='The expected docker registry in boms.')
self.add_argument(
parser, 'bintray_org', defaults, None,
help='The expected bintray organization in boms.')
self.add_argument(
parser, 'bintray_debian_repository', defaults, None,
help='The expected bintray debian repository in boms.')
class CollectArtifactVersions(CommandProcessor):
"""Locate all the existing spinnaker build artifacts.
Ultimately this produces files mapping all the existing artifact
builds for each service of a given type. It also looks for consistency
between the bintray jar and debian builds.
Emits files:
<debian_repository>__versions.yml: All the debian build versions
<jar_repository>__versions.yml: All the jar build versions
<docker_registry>__versions.yml: All the container build versions
missing_jars.yml: Bintray debian versions without a corresponding jar
missing_debians.yml: Bintray jar versions witout a corresponding debian
config.yml: The configuration values used to collect the artifacts
"""
def __init__(self, factory, options, **kwargs):
super(CollectArtifactVersions, self).__init__(
factory, options, **kwargs)
check_options_set(options,
['docker_registry', 'bintray_org',
'bintray_jar_repository', 'bintray_debian_repository'])
user = os.environ.get('BINTRAY_USER')
password = os.environ.get('BINTRAY_KEY')
if user and password:
user_password = '{user}:{password}'.format(user=user, password=password)
encoded_auth = base64.encodestring(user_password.encode())[:-1] # no eoln
self.__basic_auth = 'Basic %s' % encoded_auth.decode()
else:
self.__basic_auth = None
def fetch_bintray_url(self, bintray_url):
request = Request(bintray_url)
if self.__basic_auth:
request.add_header('Authorization', self.__basic_auth)
try:
response = urlopen(request)
headers = response.info()
payload = response.read()
content = json.JSONDecoder().decode(payload.decode())
except HTTPError as ex:
raise_and_log_error(
ResponseError('Bintray failure: {}'.format(ex),
server='bintray.api'),
'Failed on url=%s: %s' % (bintray_url, exception_to_message(ex)))
except Exception as ex:
raise
return headers, content
def list_bintray_packages(self, subject_repo):
path = 'repos/%s/packages' % subject_repo
base_url = 'https://api.bintray.com/' + path
result = []
while True:
url = base_url + '?start_pos=%d' % len(result)
headers, content = self.fetch_bintray_url(url)
# logging.debug('Bintray responded with headers\n%s', headers)
total = headers.get('X-RangeLimit-Total', 0)
result.extend(['%s/%s' % (subject_repo, entry['name'])
for entry in content])
if len(result) >= total:
break
return result
def query_bintray_package_versions(self, package_path):
path = 'packages/' + package_path
url = 'https://api.bintray.com/' + path
_, content = self.fetch_bintray_url(url)
# logging.debug('Bintray responded with headers\n%s', headers)
package_name = package_path[package_path.rfind('/') + 1:]
return (package_name, content['versions'])
def difference(self, versions, target):
missing = []
for version in versions:
if not version in target:
missing.append(version)
return missing
def find_missing_jar_versions(self, jar_map, debian_map):
missing_jars = {}
prefix = 'spinnaker-'
for package, versions in debian_map.items():
if package.startswith(prefix):
key = package[len(prefix):]
if not key in jar_map:
key = package
if key == 'spinnaker-monitoring':
key = 'spinnaker-monitoring-daemon'
if not key in jar_map:
if key == 'spinnaker-monitoring-third-party':
continue
continue
missing = self.difference(versions, jar_map.get(key))
if missing:
missing_jars[key] = missing
return missing_jars
def find_missing_debian_versions(self, jar_map, debian_map):
missing_debians = {}
for package, versions in jar_map.items():
key = 'spinnaker-' + package
if not key in debian_map:
key = package
if not key in debian_map:
if key == 'spinnaker-monitoring':
key = 'spinnaker-monitoring-daemon'
else:
raise ValueError('Unknown DEBIAN "%s"' % package)
missing = self.difference(versions, debian_map.get(key))
if missing:
missing_debians[key] = missing
return missing_debians
def collect_bintray_versions(self, pool):
options = self.options
repos = [('jar', options.bintray_jar_repository),
('debian', options.bintray_debian_repository)]
results = []
for repo_type, bintray_repo in repos:
subject_repo = '%s/%s' % (options.bintray_org, bintray_repo)
packages = self.list_bintray_packages(subject_repo)
package_versions = pool.map(self.query_bintray_package_versions, packages)
package_map = {}
for name, versions in package_versions:
package_map[name] = versions
results.append(package_map)
path = os.path.join(
self.get_output_dir(),
'%s__%s_versions.yml' % (bintray_repo, repo_type))
logging.info('Writing %s versions to %s', bintray_repo, path)
write_to_path(yaml.safe_dump(package_map,
allow_unicode=True,
default_flow_style=False), path)
return results[0], results[1]
def query_gcr_image_versions(self, image):
options = self.options
command_parts = ['gcloud',
'--format=json',
'container images list-tags',
image, '--limit 10000']
if options.gcb_service_account:
command_parts.extend(['--account', options.gcb_service_account])
response = check_subprocess(' '.join(command_parts))
result = []
for version in json.JSONDecoder().decode(response):
result.extend(version['tags'])
return (image[image.rfind('/') + 1:], result)
def collect_gcb_versions(self, pool):
options = self.options
logging.debug('Collecting GCB versions from %s', options.docker_registry)
command_parts = ['gcloud',
'--format=json',
'container images list',
'--repository', options.docker_registry]
if options.gcb_service_account:
logging.debug('Using account %s', options.gcb_service_account)
command_parts.extend(['--account', options.gcb_service_account])
response = check_subprocess(' '.join(command_parts))
images = [entry['name']
for entry in json.JSONDecoder().decode(response)]
image_versions = pool.map(self.query_gcr_image_versions, images)
image_map = {}
for name, versions in image_versions:
image_map[name] = versions
path = os.path.join(
self.get_output_dir(),
options.docker_registry.replace('/', '__') + '__gcb_versions.yml')
logging.info('Writing %s versions to %s', options.docker_registry, path)
write_to_path(yaml.safe_dump(image_map,
allow_unicode=True,
default_flow_style=False), path)
return image_map
def collect_gce_image_versions(self):
options = self.options
project = options.publish_gce_image_project
logging.debug('Collecting GCE image versions from %s', project)
command_parts = ['gcloud', '--format=json',
'compute images list', '--project', project,
'--filter spinnaker-']
if options.build_gce_service_account:
logging.debug('Using account %s', options.build_gce_service_account)
command_parts.extend(['--account', options.build_gce_service_account])
response = check_subprocess(' '.join(command_parts))
images = [entry['name']
for entry in json.JSONDecoder().decode(response)]
image_map = {}
for name in images:
parts = name.split('-', 2)
if len(parts) != 3:
logging.warning('Skipping malformed %s', name)
continue
_, module, build_version = parts
parts = build_version.split('-')
if len(parts) != 4:
logging.warning('Skipping malformed %s', name)
continue
version_list = image_map.get(module, [])
version_list.append('{}.{}.{}-{}'.format(*parts))
image_map[module] = version_list
path = os.path.join(
self.get_output_dir(), project + '__gce_image_versions.yml')
logging.info('Writing gce image versions to %s', path)
write_to_path(yaml.safe_dump(image_map,
allow_unicode=True,
default_flow_style=False), path)
return image_map
def _do_command(self):
pool = ThreadPool(16)
bintray_jars, bintray_debians = self.collect_bintray_versions(pool)
self.collect_gcb_versions(pool)
self.collect_gce_image_versions()
pool.close()
pool.join()
missing_jars = self.find_missing_jar_versions(
bintray_jars, bintray_debians)
missing_debians = self.find_missing_debian_versions(
bintray_jars, bintray_debians)
options = self.options
for which in [(options.bintray_jar_repository, missing_jars),
(options.bintray_debian_repository, missing_debians)]:
if not which[1]:
logging.info('%s is all accounted for.', which[0])
continue
path = os.path.join(self.get_output_dir(), 'missing_%s.yml' % which[0])
logging.info('Writing to %s', path)
write_to_path(
yaml.safe_dump(which[1], allow_unicode=True,
default_flow_style=False),
path)
config = {
'bintray_org': options.bintray_org,
'bintray_jar_repository': options.bintray_jar_repository,
'bintray_debian_repository': options.bintray_debian_repository,
'docker_registry': options.docker_registry,
'googleImageProject': options.publish_gce_image_project
}
path = os.path.join(self.get_output_dir(), 'config.yml')
logging.info('Writing to %s', path)
write_to_path(yaml.safe_dump(config, default_flow_style=False), path)
class CollectArtifactVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(CollectArtifactVersionsFactory, self).__init__(
'collect_artifact_versions', CollectArtifactVersions,
'Find information about artifact jar/debian versions.', **kwargs)
def init_argparser(self, parser, defaults):
super(CollectArtifactVersionsFactory, self).init_argparser(
parser, defaults)
self.add_argument(
parser, 'bintray_org', defaults, None,
help='bintray organization for the jar and debian repositories.')
self.add_argument(
parser, 'bintray_jar_repository', defaults, None,
help='bintray repository in the bintray_org containing published jars.')
self.add_argument(
parser, 'bintray_debian_repository', defaults, None,
help='bintray repository in the bintray_org containing debians.')
self.add_argument(
parser, 'version_name_prefix', defaults, None,
help='Prefix for bintray versions to collect.')
self.add_argument(
parser, 'gcb_service_account', defaults, None,
help='The service account to use when checking gcr images.')
self.add_argument(
parser, 'docker_registry', defaults, None,
help='The GCB service account query image versions from.')
self.add_argument(
parser, 'build_gce_service_account', defaults, None,
help='The service account to use with the gce project.')
self.add_argument(
parser, 'publish_gce_image_project', defaults, None,
help='The GCE project ot collect images from.')
class AuditArtifactVersions(CommandProcessor):
"""Given the collected BOMs and artifacts, separate good from bad.
Ultimately this determines which existing artifacts are in use and which are
not referenced by a bom. It also verifies the integrity of the boms with
regard to the existence of the artifacts they specify. It will emit files
that suggest which specific boms and artifacts can be deleted. The artifacts
in use by the boms suggested for pruning are not included in the prune list.
They will be nominated in the next round.
Emits files:
audit_confirmed_boms.yml: All the boms that have been verified intact.
audit_found_<type>.yml: All the artifacts of <type> that were referenced
by a bom.
audit_missing_<type>.yml: All the artifacts of <type> that were not
referenced by a bom.
audit_unused_<type>.yml: All the artifacts of <type> that were referenced
by a bom but not found to actually exist. These are for documentation.
The audit_invalid_boms.yml file is more useful.
audit_invalid_boms.yml: All the boms whose integrity is suspect along with
the explanation as to why. Usually they are missing artifacts, but
there could be other reasons.
prune_<type>.txt The list of URLs that should be safe to delete for the
given <type> from a strict referential integrity standpoint. There
could be unanticipated uses of these artifacts.
"""
def __init_bintray_versions_helper(self, base_path):
artifact_data_dir = os.path.join(base_path, 'collect_artifact_versions')
debian_paths = []
jar_paths = []
gcr_paths = []
image_paths = []
for filename in os.listdir(artifact_data_dir):
path = os.path.join(artifact_data_dir, filename)
if filename.endswith('__gcb_versions.yml'):
gcr_paths.append(path)
elif filename.endswith('__jar_versions.yml'):
jar_paths.append(path)
elif filename.endswith('__debian_versions.yml'):
debian_paths.append(path)
elif filename.endswith('__gce_image_versions.yml'):
image_paths.append(path)
for name, found in [('jar', jar_paths), ('debian', debian_paths),
('gce image', image_paths), ('gcr image', gcr_paths)]:
if len(found) != 1:
raise_and_log_error(
ConfigError(
'Expected 1 %s version files in "%s": %s' % (
name, artifact_data_dir, found)))
logging.debug('Loading container image versions from "%s"', gcr_paths[0])
with open(gcr_paths[0], 'r') as stream:
self.__container_versions = yaml.safe_load(stream.read())
with open(jar_paths[0], 'r') as stream:
self.__jar_versions = yaml.safe_load(stream.read())
with open(debian_paths[0], 'r') as stream:
self.__debian_versions = yaml.safe_load(stream.read())
with open(image_paths[0], 'r') as stream:
self.__gce_image_versions = yaml.safe_load(stream.read())
def __extract_all_bom_versions(self, bom_map):
result = set([])
for versions in bom_map.values():
if not versions:
continue
for commits in versions.values():
for buildnum in commits.values():
for info_list in buildnum.values():
for info in info_list:
result.add(info['bom_version'])
return result
def __remove_old_bom_versions(self, min_semver, version_to_commit_boms):
"""Remove references to older boms in collected bom info.
Args:
min_semver: [SemanticVersion] minimally acceptable semantic version
version_to_commit_boms: [dict of {commit_id, build_info}]
where build_info is a dictionary mapping buildnum to list of
bom_metadata dictionaries.
Returns:
copy of versions but without build_info referencing older bom_versions.
"""
def list_of_current_bom_meta(min_semver, all_bom_meta):
good_bom_meta = []
for bom_meta in all_bom_meta:
semver = SemanticVersion.make('ignored-' + bom_meta['bom_version'])
if SemanticVersion.compare(semver, min_semver) >= 0:
good_bom_meta.append(bom_meta)
return good_bom_meta
def commit_to_current_bom_meta(min_semver, build_map):
build_info = {}
for buildnum, all_bom_meta in build_map.items():
good_bom_meta = list_of_current_bom_meta(min_semver, all_bom_meta)
if good_bom_meta:
build_info[buildnum] = good_bom_meta
return build_info
result = {}
for version, commit_build_map in version_to_commit_boms.items():
commit_map = {}
for commit_id, orig_build_map in commit_build_map.items():
build_map = commit_to_current_bom_meta(min_semver, orig_build_map)
if build_map:
commit_map[commit_id] = build_map
if commit_map:
result[version] = commit_map
else:
logging.info(
'Dropping version=%s because it bom versions are all too old.',
version)
return result
def __init__(self, factory, options, **kwargs):
if options.prune_min_buildnum_prefix is not None:
# Typically numeric so is interpreted as number from yaml
options.prune_min_buildnum_prefix = str(options.prune_min_buildnum_prefix)
super(AuditArtifactVersions, self).__init__(factory, options, **kwargs)
base_path = os.path.dirname(self.get_output_dir())
self.__init_bintray_versions_helper(base_path)
min_version = options.min_audit_bom_version or '0.0.0'
min_parts = min_version.split('.')
if len(min_parts) < 3:
min_version += '.0' * (3 - len(min_parts))
self.__min_semver = SemanticVersion.make('ignored-' + min_version)
bom_data_dir = os.path.join(base_path, 'collect_bom_versions')
path = os.path.join(bom_data_dir, 'released_bom_service_map.yml')
check_path_exists(path, 'released bom analysis')
with open(path, 'r') as stream:
self.__all_released_boms = {} # forever
self.__current_released_boms = {} # since min_version to audit
for service, versions in yaml.safe_load(stream.read()).items():
if not versions:
# e.g. this service has not yet been released.
logging.info('No versions for service=%s', service)
continue
self.__all_released_boms[service] = versions
self.__current_released_boms[service] = versions
stripped_versions = self.__remove_old_bom_versions(
self.__min_semver, versions)
if stripped_versions:
self.__current_released_boms[service] = stripped_versions
path = os.path.join(bom_data_dir, 'unreleased_bom_service_map.yml')
check_path_exists(path, 'unreleased bom analysis')
with open(path, 'r') as stream:
self.__unreleased_boms = yaml.safe_load(stream.read())
self.__only_bad_and_invalid_boms = False
self.__all_bom_versions = self.__extract_all_bom_versions(
self.__all_released_boms)
self.__all_bom_versions.update(
self.__extract_all_bom_versions(self.__unreleased_boms))
self.__missing_debians = {}
self.__missing_jars = {}
self.__missing_containers = {}
self.__missing_images = {}
self.__found_debians = {}
self.__found_jars = {}
self.__found_containers = {}
self.__found_images = {}
self.__unused_jars = {}
self.__unused_debians = {}
self.__unused_containers = {}
self.__unused_gce_images = {}
self.__invalid_boms = {}
self.__confirmed_boms = set([])
self.__prune_boms = []
self.__prune_jars = {}
self.__prune_debians = {}
self.__prune_containers = {}
self.__prune_gce_images = {}
self.__invalid_versions = {}
def audit_artifacts(self):
self.audit_bom_services(self.__all_released_boms, 'released')
self.audit_bom_services(self.__unreleased_boms, 'unreleased')
self.audit_package(
'jar', self.__jar_versions, self.__unused_jars)
self.audit_package(
'debian', self.__debian_versions, self.__unused_debians)
self.audit_package(
'container', self.__container_versions, self.__unused_containers)
self.audit_package(
'image',
self.__gce_image_versions, self.__unused_gce_images)
def maybe_write_log(what, data):
if not data:
return
path = os.path.join(self.get_output_dir(), 'audit_' + what + '.yml')
logging.info('Writing %s', path)
write_to_path(
yaml.safe_dump(data, allow_unicode=True, default_flow_style=False),
path)
confirmed_boms = self.__all_bom_versions - set(self.__invalid_boms.keys())
unchecked_releases = [
key
for key in self.__all_bom_versions
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) < 0)]
invalid_releases = {
key: bom
for key, bom in self.__invalid_boms.items()
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) >= 0)}
confirmed_releases = [
key
for key in confirmed_boms
if (CollectBomVersions.RELEASED_VERSION_MATCHER.match(key)
and SemanticVersion.compare(SemanticVersion.make('ignored-' + key),
self.__min_semver) >= 0)]
maybe_write_log('missing_debians', self.__missing_debians)
maybe_write_log('missing_jars', self.__missing_jars)
maybe_write_log('missing_containers', self.__missing_containers)
maybe_write_log('missing_images', self.__missing_images)
maybe_write_log('found_debians', self.__found_debians)
maybe_write_log('found_jars', self.__found_jars)
maybe_write_log('found_containers', self.__found_containers)
maybe_write_log('found_images', self.__found_images)
maybe_write_log('unused_debians', self.__unused_debians)
maybe_write_log('unused_jars', self.__unused_jars)
maybe_write_log('unused_containers', self.__unused_containers)
maybe_write_log('unused_images', self.__unused_gce_images)
maybe_write_log('invalid_boms', self.__invalid_boms)
maybe_write_log('confirmed_boms', sorted(list(confirmed_boms)))
maybe_write_log('confirmed_releases', sorted(list(confirmed_releases)))
maybe_write_log('invalid_versions', self.__invalid_versions)
maybe_write_log('invalid_releases', invalid_releases)
maybe_write_log('unchecked_releases', unchecked_releases)
def most_recent_version(self, name, versions):
"""Find the most recent version built."""
if not versions:
return None
raw_versions = set([version.split('-')[0] for version in versions])
sem_vers = []
for text in raw_versions:
try:
sem_vers.append(SemanticVersion.make('version-' + text))
except Exception as ex:
bad_list = self.__invalid_versions.get(name, [])
bad_list.append(text)
self.__invalid_versions[name] = bad_list
logging.error('Ignoring invalid %s version "%s": %s', name, text, ex)
return sorted(sem_vers)[-1].to_version()
def test_buildnum(self, buildver):
dash = buildver.rfind('-')
if dash < 0 or not self.options.prune_min_buildnum_prefix:
return True
buildnum = buildver[dash + 1:]
return buildnum < self.options.prune_min_buildnum_prefix
def determine_bom_candidates(self):
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_bom_versions', 'bom_list.txt')
candidates = []
with open(path, 'r') as stream:
for line in stream.read().split('\n'):
if line.endswith('-latest-unvalidated.yml'):
continue
bom = CollectBomVersions.url_to_bom_name(line)
if not CollectBomVersions.RELEASED_VERSION_MATCHER.match(bom):
candidates.append(line)
return candidates
def determine_prunings(self):
def filter_from_candidates(newest_version, candidate_version_list):
if self.options.prune_keep_latest_version:
prune_version = lambda ver: not ver.startswith(newest_version)
else:
prune_version = lambda ver: True
if self.options.prune_min_buildnum_prefix:
prune_buildnum = self.test_buildnum
else:
prune_buildnum = lambda ver: True
return [candidate for candidate in candidate_version_list
if prune_version(candidate) and prune_buildnum(candidate)]
self.__prune_boms = [name for name in self.determine_bom_candidates()
if self.test_buildnum(name)]
service_list = set(self.__found_debians.keys())
service_list.update(set(self.__found_containers.keys()))
for name in service_list:
skip_versions = self.__invalid_versions.get(name, [])
for unused_map, prune_map in [
(self.__unused_jars, self.__prune_jars),
(self.__unused_debians, self.__prune_debians),
(self.__unused_gce_images, self.__prune_gce_images),
(self.__unused_containers, self.__prune_containers)]:
unused_list = unused_map.get(name, None)
if unused_list is None:
unused_list = unused_map.get('spinnaker-' + name, [])
if not unused_list and name == 'monitoring-daemon':
# Some repos have 'spinnaker-monitoring', not individual components
unused_list = unused_map.get('spinnaker-monitoring', [])
if unused_list:
name = 'spinnaker-monitoring'
newest_version = self.most_recent_version(name, unused_list)
candidates = filter_from_candidates(newest_version, unused_list)
# We're going to keep malformed versions. These are rare so
# we'll leave it to manual cleanup.
pruned = [version
for version in candidates if not version in skip_versions]
if pruned:
prune_map[name] = sorted(pruned)
def suggest_prunings(self):
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_bom_versions', 'config.yml')
with open(path, 'r') as stream:
bom_config = yaml.safe_load(stream.read())
path = os.path.join(os.path.dirname(self.get_output_dir()),
'collect_artifact_versions', 'config.yml')
with open(path, 'r') as stream:
art_config = yaml.safe_load(stream.read())
if self.__prune_boms:
path = os.path.join(self.get_output_dir(), 'prune_boms.txt')
logging.info('Writing to %s', path)
write_to_path('\n'.join(sorted(self.__prune_boms)), path)
jar_repo_path = 'packages/%s/%s' % (
art_config['bintray_org'], art_config['bintray_jar_repository'])
debian_repo_path = 'packages/%s/%s' % (
art_config['bintray_org'], art_config['bintray_debian_repository'])
artifact_prefix_func = {
'jar': lambda name: 'https://api.bintray.com/%s/%s/versions/' % (
jar_repo_path, name),
'debian': lambda name: 'https://api.bintray.com/%s/%s/versions/' % (
debian_repo_path,
name if name == 'spinnaker' else 'spinnaker-' + name),
'container': lambda name: '%s/%s:' % (
art_config['docker_registry'], name),
'image': lambda name: 'spinnaker-%s-' % name
}
artifact_version_func = {
'jar': lambda version: version,
'debian': lambda version: version,
'container': lambda version: version,
'image': lambda version: version.replace('.', '-')
}
for art_type, art_map in [('jar', self.__prune_jars),
('debian', self.__prune_debians),
('container', self.__prune_containers),
('image', self.__prune_gce_images)]:
urls = []
for service, art_list in art_map.items():
prefix = artifact_prefix_func[art_type](service)
version_func = artifact_version_func[art_type]
urls.extend([prefix + version_func(version) for version in art_list])
if urls:
path = os.path.join(self.get_output_dir(), 'prune_%ss.txt' % art_type)
logging.info('Writing to %s', path)
write_to_path('\n'.join(sorted(urls)), path)
def _do_command(self):
self.audit_artifacts()
self.determine_prunings()
self.suggest_prunings()
def audit_container(self, service, build_version, entries):
if service in ['spinnaker', 'monitoring-third-party']:
return True # not applicable
if service in self.__container_versions:
versions = self.__container_versions[service]
elif service in ['monitoring-daemon']:
versions = self.__container_versions.get('monitoring-daemon', [])
else:
versions = []
if build_version in versions:
holder = self.__found_containers.get(service, {})
holder[build_version] = entries
self.__found_containers[service] = holder
return True
holder = self.__missing_containers.get(service, {})
holder[build_version] = entries
self.__missing_containers[service] = holder
logging.warning('Missing %s container %s', service, build_version)
return False
def audit_image(self, service, build_version, entries):
if service in ['spinnaker',
'monitoring-third-party', 'monitoring-daemon']:
return True # not applicable
versions = self.__gce_image_versions.get(service, [])
if build_version in versions:
holder = self.__found_images.get(service, {})
holder[build_version] = entries
self.__found_images[service] = holder
return True
# 20181109: Dont audit images as these are no longer built
# so are not expected anyway.
#
expect_images = False
if not expect_images:
return True
# 20181109: We're leaving this around for the time being
# but it isnt reachable.
holder = self.__missing_images.get(service, {})
holder[build_version] = entries
self.__missing_images[service] = holder
logging.warning('Missing %s gce image %s', service, build_version)
return False
def audit_jar(self, service, build_version, entries):
if service in self.__jar_versions:
versions = self.__jar_versions[service]
elif service in ['monitoring-daemon', 'monitoring-third-party']:
versions = self.__jar_versions.get('spinnaker-monitoring', [])
else:
versions = []
if build_version in versions:
holder = self.__found_jars.get(service, {})
holder[build_version] = entries
self.__found_jars[service] = holder
return True
holder = self.__missing_jars.get(service, {})
holder[build_version] = entries
self.__missing_jars[service] = holder
logging.warning('Missing %s jar %s', service, build_version)
return False
def audit_debian(self, service, build_version, info_list):
versions = []
if service in self.__debian_versions:
key = service
versions = self.__debian_versions[service]
else:
key = 'spinnaker-' + service
if key in self.__debian_versions:
versions = self.__debian_versions[key]
if build_version in versions:
holder = self.__found_debians.get(service, {})
holder[build_version] = info_list
self.__found_debians[service] = holder
return True
holder = self.__missing_debians.get(key, {})
holder[build_version] = info_list
self.__missing_debians[key] = holder
logging.warning('Missing %s debian %s', key, build_version)
return False
def package_in_bom_map(self, service, version, buildnum, service_map):
version_map = service_map.get(service)
if version_map is None:
return False
commit_map = version_map.get(version)
if commit_map is None:
return False
for _, buildnums in commit_map.items():
if buildnum in buildnums:
return True
return False
def audit_package_helper(self, package, version, buildnum, which):
if package in self.__all_released_boms or package in self.__unreleased_boms:
name = package
elif package.startswith('spinnaker-'):
name = package[package.find('-') + 1:]
else:
return False
is_released = self.package_in_bom_map(
name, version, buildnum, self.__all_released_boms)
is_unreleased = self.package_in_bom_map(
name, version, buildnum, self.__unreleased_boms)
if is_released or is_unreleased:
return True
data_list = which.get(package, [])
if buildnum:
data_list.append('%s-%s' % (version, buildnum))
else:
data_list.append(version)
which[package] = data_list
return False
def audit_package(self, kind, packages, which):
logging.info('Auditing %s packages', kind)
for package, versions in packages.items():
if package == 'halyard':
logging.warning('Skipping halyard.')
continue
for build_version in versions:
parts = build_version.split('-', 1)
if len(parts) == 1:
logging.warning('Unexpected %s version %s', package, build_version)
continue
version, buildnum = parts
self.audit_package_helper(package, version, buildnum, which)
def audit_bom_services(self, bom_services, title):
def add_invalid_boms(jar_ok, deb_ok, container_ok, image_ok,
service, version_buildnum, info_list, invalid_boms):
if jar_ok and deb_ok and container_ok and image_ok:
return
kind_checks = [(jar_ok, 'jars'), (deb_ok, 'debs'),
(container_ok, 'containers'), (image_ok, 'images')]
for info in info_list:
bom_version = info['bom_version']
bom_record = invalid_boms.get(bom_version, {})
for is_ok, kind in kind_checks:
if not is_ok:
problems = bom_record.get(kind, {})
problems[service] = version_buildnum
bom_record[kind] = problems
invalid_boms[bom_version] = bom_record
def audit_service(service, versions):
for version, commits in versions.items():
for _, buildnums in commits.items():
for buildnum, info_list in buildnums.items():
version_buildnum = '%s-%s' % (version, buildnum)
if service in ['monitoring-daemon', 'monitoring-third-party']:
# Uses debians, but not jars so missing jars is ok.
jar_ok = True
else:
jar_ok = self.audit_jar(service, version_buildnum, info_list)
deb_ok = self.audit_debian(service, version_buildnum, info_list)
gcr_ok = self.audit_container(service, version_buildnum, info_list)
image_ok = self.audit_image(service, version_buildnum, info_list)
add_invalid_boms(jar_ok, deb_ok, gcr_ok, image_ok,
service, version_buildnum,
info_list, self.__invalid_boms)
logging.debug('Auditing %s BOMs', title)
for service, versions in bom_services.items():
if not versions:
logging.debug('No versions for %s', service)
continue
audit_service(service, versions)
class AuditArtifactVersionsFactory(CommandFactory):
def __init__(self, **kwargs):
super(AuditArtifactVersionsFactory, self).__init__(
'audit_artifact_versions', AuditArtifactVersions,
'Audit artifact versions in BOMs and vice-versa', **kwargs)
def init_argparser(self, parser, defaults):
super(AuditArtifactVersionsFactory, self).init_argparser(parser, defaults)
self.add_argument(parser, 'min_audit_bom_version', defaults, None,
help='Minimum released bom version to audit.')
self.add_argument(
parser, 'prune_min_buildnum_prefix', defaults, None,
help='Only suggest pruning artifacts with a smaller build number.'
' This is actually just a string, not a number so is a string compare.')
self.add_argument(
parser, 'prune_keep_latest_version', defaults, False, type=bool,
help='If true, suggest only artifacts whose version is not the most'
' recent version among the boms surveyed.')
def register_commands(registry, subparsers, defaults):
CollectBomVersionsFactory().register(registry, subparsers, defaults)
CollectArtifactVersionsFactory().register(registry, subparsers, defaults)
AuditArtifactVersionsFactory().register(registry, subparsers, defaults)
|
|
import errno
import os
import selectors
import signal
import socket
import struct
import sys
import threading
from . import connection
from . import process
from .context import reduction
from . import semaphore_tracker
from . import spawn
from . import util
__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
'set_forkserver_preload']
#
#
#
MAXFDS_TO_SEND = 256
UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t
#
# Forkserver class
#
class ForkServer(object):
def __init__(self):
self._forkserver_address = None
self._forkserver_alive_fd = None
self._inherited_fds = None
self._lock = threading.Lock()
self._preload_modules = ['__main__']
def set_forkserver_preload(self, modules_names):
'''Set list of module names to try to load in forkserver process.'''
if not all(type(mod) is str for mod in self._preload_modules):
raise TypeError('module_names must be a list of strings')
self._preload_modules = modules_names
def get_inherited_fds(self):
'''Return list of fds inherited from parent process.
This returns None if the current process was not started by fork
server.
'''
return self._inherited_fds
def connect_to_new_process(self, fds):
'''Request forkserver to create a child process.
Returns a pair of fds (status_r, data_w). The calling process can read
the child process's pid and (eventually) its returncode from status_r.
The calling process should write to data_w the pickled preparation and
process data.
'''
self.ensure_running()
if len(fds) + 4 >= MAXFDS_TO_SEND:
raise ValueError('too many fds')
with socket.socket(socket.AF_UNIX) as client:
client.connect(self._forkserver_address)
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
allfds = [child_r, child_w, self._forkserver_alive_fd,
semaphore_tracker.getfd()]
allfds += fds
try:
reduction.sendfds(client, allfds)
return parent_r, parent_w
except:
os.close(parent_r)
os.close(parent_w)
raise
finally:
os.close(child_r)
os.close(child_w)
def ensure_running(self):
'''Make sure that a fork server is running.
This can be called from any process. Note that usually a child
process will just reuse the forkserver started by its parent, so
ensure_running() will do nothing.
'''
with self._lock:
semaphore_tracker.ensure_running()
if self._forkserver_alive_fd is not None:
return
cmd = ('from multiprocessing.forkserver import main; ' +
'main(%d, %d, %r, **%r)')
if self._preload_modules:
desired_keys = {'main_path', 'sys_path'}
data = spawn.get_preparation_data('ignore')
data = dict((x,y) for (x,y) in data.items()
if x in desired_keys)
else:
data = {}
with socket.socket(socket.AF_UNIX) as listener:
address = connection.arbitrary_address('AF_UNIX')
listener.bind(address)
os.chmod(address, 0o600)
listener.listen()
# all client processes own the write end of the "alive" pipe;
# when they all terminate the read end becomes ready.
alive_r, alive_w = os.pipe()
try:
fds_to_pass = [listener.fileno(), alive_r]
cmd %= (listener.fileno(), alive_r, self._preload_modules,
data)
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd]
pid = util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(alive_w)
raise
finally:
os.close(alive_r)
self._forkserver_address = address
self._forkserver_alive_fd = alive_w
#
#
#
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
'''Run forkserver.'''
if preload:
if '__main__' in preload and main_path is not None:
process.current_process()._inheriting = True
try:
spawn.import_main_path(main_path)
finally:
del process.current_process()._inheriting
for modname in preload:
try:
__import__(modname)
except ImportError:
pass
util._close_stdin()
# ignoring SIGCHLD means no need to reap zombie processes
handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
selectors.DefaultSelector() as selector:
_forkserver._forkserver_address = listener.getsockname()
selector.register(listener, selectors.EVENT_READ)
selector.register(alive_r, selectors.EVENT_READ)
while True:
try:
while True:
rfds = [key.fileobj for (key, events) in selector.select()]
if rfds:
break
if alive_r in rfds:
# EOF because no more client processes left
assert os.read(alive_r, 1) == b''
raise SystemExit
assert listener in rfds
with listener.accept()[0] as s:
code = 1
if os.fork() == 0:
try:
_serve_one(s, listener, alive_r, handler)
except Exception:
sys.excepthook(*sys.exc_info())
sys.stderr.flush()
finally:
os._exit(code)
except OSError as e:
if e.errno != errno.ECONNABORTED:
raise
def _serve_one(s, listener, alive_r, handler):
# close unnecessary stuff and reset SIGCHLD handler
listener.close()
os.close(alive_r)
signal.signal(signal.SIGCHLD, handler)
# receive fds from parent process
fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
s.close()
assert len(fds) <= MAXFDS_TO_SEND
(child_r, child_w, _forkserver._forkserver_alive_fd,
stfd, *_forkserver._inherited_fds) = fds
semaphore_tracker._semaphore_tracker._fd = stfd
# send pid to client processes
write_unsigned(child_w, os.getpid())
# reseed random number generator
if 'random' in sys.modules:
import random
random.seed()
# run process object received over pipe
code = spawn._main(child_r)
# write the exit code to the pipe
write_unsigned(child_w, code)
#
# Read and write unsigned numbers
#
def read_unsigned(fd):
data = b''
length = UNSIGNED_STRUCT.size
while len(data) < length:
s = os.read(fd, length - len(data))
if not s:
raise EOFError('unexpected EOF')
data += s
return UNSIGNED_STRUCT.unpack(data)[0]
def write_unsigned(fd, n):
msg = UNSIGNED_STRUCT.pack(n)
while msg:
nbytes = os.write(fd, msg)
if nbytes == 0:
raise RuntimeError('should not get here')
msg = msg[nbytes:]
#
#
#
_forkserver = ForkServer()
ensure_running = _forkserver.ensure_running
get_inherited_fds = _forkserver.get_inherited_fds
connect_to_new_process = _forkserver.connect_to_new_process
set_forkserver_preload = _forkserver.set_forkserver_preload
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import operator
import six
from heat.common import exception
from heat.engine import function
from heat.engine import properties
__all__ = ['ResourceDefinition']
class ResourceDefinitionCore(object):
"""
A definition of a resource, independent of any particular template format.
"""
DELETION_POLICIES = (
DELETE, RETAIN, SNAPSHOT,
) = (
'Delete', 'Retain', 'Snapshot',
)
def __init__(self, name, resource_type, properties=None, metadata=None,
depends=None, deletion_policy=None, update_policy=None,
description=None):
"""
Initialise with the parsed definition of a resource.
Any intrinsic functions present in any of the sections should have been
parsed into Function objects before constructing the definition.
:param name: The name of the resource (for use in error messages)
:param resource_type: The resource type
:param properties: A dictionary of supplied property values
:param metadata: The supplied metadata
:param depends: A list of resource names on which this resource depends
:param deletion_policy: The deletion policy for the resource
:param update_policy: A dictionary of supplied update policies
:param description: A string describing the resource
"""
depends = depends or []
self.name = name
self.resource_type = resource_type
self.description = description or ''
self._properties = properties
self._metadata = metadata
self._depends = depends
self._deletion_policy = deletion_policy
self._update_policy = update_policy
self._hash = hash(self.resource_type)
self._rendering = None
assert isinstance(self.description, six.string_types)
if properties is not None:
assert isinstance(properties, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(properties)
if metadata is not None:
assert isinstance(metadata, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(metadata)
assert isinstance(depends, (collections.Sequence,
function.Function))
assert not isinstance(depends, six.string_types)
self._hash ^= _hash_data(depends)
if deletion_policy is not None:
assert deletion_policy in self.DELETION_POLICIES
self._hash ^= hash(deletion_policy)
if update_policy is not None:
assert isinstance(update_policy, (collections.Mapping,
function.Function))
self._hash ^= _hash_data(update_policy)
def freeze(self, **overrides):
"""
Return a frozen resource definition, with all functions resolved.
This return a new resource definition with fixed data (containing no
intrinsic functions). Named arguments passed to this method override
the values passed as arguments to the constructor.
"""
def arg_item(attr_name):
name = attr_name.lstrip('_')
if name in overrides:
value = overrides[name]
if not value and getattr(self, attr_name) is None:
value = None
else:
value = function.resolve(getattr(self, attr_name))
return name, value
args = ('name', 'resource_type', '_properties', '_metadata',
'_depends', '_deletion_policy', '_update_policy',
'description')
defn = type(self)(**dict(arg_item(a) for a in args))
defn._frozen = True
return defn
def reparse(self, stack, template):
"""
Reinterpret the resource definition in the context of a new stack.
This returns a new resource definition, with all of the functions
parsed in the context of the specified stack and template.
"""
assert not getattr(self, '_frozen', False
), "Cannot re-parse a frozen definition"
def reparse_snippet(snippet):
return template.parse(stack, copy.deepcopy(snippet))
return type(self)(
self.name, self.resource_type,
properties=reparse_snippet(self._properties),
metadata=reparse_snippet(self._metadata),
depends=reparse_snippet(self._depends),
deletion_policy=reparse_snippet(self._deletion_policy),
update_policy=reparse_snippet(self._update_policy))
def dep_attrs(self, resource_name):
"""
Return an iterator over dependent attributes for specified
resource_name in resources' properties and metadata fields.
"""
return itertools.chain(function.dep_attrs(self._properties,
resource_name),
function.dep_attrs(self._metadata,
resource_name))
def dependencies(self, stack):
"""
Return the Resource objects in the given stack on which this depends.
"""
def path(section):
return '.'.join([self.name, section])
def get_resource(res_name):
if res_name not in stack:
raise exception.InvalidTemplateReference(resource=res_name,
key=self.name)
return stack[res_name]
def strict_func_deps(data, datapath):
return six.moves.filter(lambda r: getattr(r, 'strict_dependency',
True),
function.dependencies(data, datapath))
return itertools.chain((get_resource(dep) for dep in self._depends),
strict_func_deps(self._properties,
path(PROPERTIES)),
strict_func_deps(self._metadata,
path(METADATA)))
def properties(self, schema, context=None):
"""
Return a Properties object representing the resource properties.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
return properties.Properties(schema, self._properties or {},
function.resolve, self.name, context,
section=PROPERTIES)
def deletion_policy(self):
"""
Return the deletion policy for the resource.
The policy will be one of those listed in DELETION_POLICIES.
"""
return function.resolve(self._deletion_policy) or self.DELETE
def update_policy(self, schema, context=None):
"""
Return a Properties object representing the resource update policy.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
return properties.Properties(schema, self._update_policy or {},
function.resolve, self.name, context,
section=UPDATE_POLICY)
def metadata(self):
"""
Return the resource metadata.
"""
return function.resolve(self._metadata) or {}
def render_hot(self):
"""
Return a HOT snippet for the resource definition.
"""
if self._rendering is None:
attrs = {
'type': 'resource_type',
'properties': '_properties',
'metadata': '_metadata',
'deletion_policy': '_deletion_policy',
'update_policy': '_update_policy',
'depends_on': '_depends',
}
def rawattrs():
"""Get an attribute with function objects stripped out."""
for key, attr in attrs.items():
value = getattr(self, attr)
if value is not None:
yield key, copy.deepcopy(value)
self._rendering = dict(rawattrs())
return self._rendering
def __eq__(self, other):
"""
Compare this resource definition for equality with another.
Two resource definitions are considered to be equal if they can be
generated from the same template snippet. The name of the resource is
ignored, as are the actual values that any included functions resolve
to.
"""
if not isinstance(other, ResourceDefinitionCore):
return NotImplemented
return self.render_hot() == other.render_hot()
def __ne__(self, other):
"""
Compare this resource definition for inequality with another.
See __eq__() for the definition of equality.
"""
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __hash__(self):
"""
Return a hash value for this resource definition.
Resource definitions that compare equal will have the same hash. (In
particular, the resource name is *not* taken into account.) See
the __eq__() method for the definition of equality.
"""
return self._hash
def __repr__(self):
"""
Return a string representation of the resource definition.
"""
def arg_repr(arg_name):
return '='.join([arg_name, repr(getattr(self, '_%s' % arg_name))])
args = ('properties', 'metadata', 'depends',
'deletion_policy', 'update_policy')
data = {
'classname': type(self).__name__,
'name': repr(self.name),
'type': repr(self.type),
'args': ', '.join(arg_repr(n) for n in args)
}
return '%(classname)s(%(name)s, %(type)s, %(args)s)' % data
_KEYS = (
TYPE, PROPERTIES, METADATA, DELETION_POLICY, UPDATE_POLICY,
DEPENDS_ON, DESCRIPTION,
) = (
'Type', 'Properties', 'Metadata', 'DeletionPolicy', 'UpdatePolicy',
'DependsOn', 'Description',
)
class ResourceDefinition(ResourceDefinitionCore, collections.Mapping):
"""
A resource definition that also acts like a cfn template snippet.
This class exists only for backwards compatibility with existing resource
plugins and unit tests; it will at some point be deprecated and then
replaced with ResourceDefinitionCore.
"""
def __eq__(self, other):
"""
Compare this resource definition for equality with another.
Two resource definitions are considered to be equal if they can be
generated from the same template snippet. The name of the resource is
ignored, as are the actual values that any included functions resolve
to.
This method can also compare the resource definition to a template
snippet. In this case, two snippets are considered equal if they
compare equal in a dictionary comparison. (Specifically, this means
that intrinsic functions are compared by their results.) This exists
solely to not break existing unit tests.
"""
if not isinstance(other, ResourceDefinitionCore):
if isinstance(other, collections.Mapping):
return dict(self) == other
return super(ResourceDefinition, self).__eq__(other)
def __iter__(self):
"""
Iterate over the available CFN template keys.
This is for backwards compatibility with existing code that expects a
parsed-JSON template snippet.
"""
yield TYPE
if self._properties is not None:
yield PROPERTIES
if self._metadata is not None:
yield METADATA
if self._deletion_policy is not None:
yield DELETION_POLICY
if self._update_policy is not None:
yield UPDATE_POLICY
if self._depends:
yield DEPENDS_ON
if self.description:
yield DESCRIPTION
def __getitem__(self, key):
"""
Get the specified item from a CFN template snippet.
This is for backwards compatibility with existing code that expects a
parsed-JSON template snippet.
"""
if key == TYPE:
return self.resource_type
elif key == PROPERTIES:
if self._properties is not None:
return self._properties
elif key == METADATA:
if self._metadata is not None:
return self._metadata
elif key == DELETION_POLICY:
if self._deletion_policy is not None:
return self._deletion_policy
elif key == UPDATE_POLICY:
if self._update_policy is not None:
return self._update_policy
elif key == DEPENDS_ON:
if self._depends:
if len(self._depends) == 1:
return self._depends[0]
return self._depends
elif key == DESCRIPTION:
if self.description:
return self.description
raise KeyError(key)
def __len__(self):
"""
Return the number of available CFN template keys.
This is for backwards compatibility with existing code that expects a
parsed-JSON template snippet.
"""
return len(list(iter(self)))
def __repr__(self):
"""
Return a string representation of the resource definition.
"""
return 'ResourceDefinition %s' % repr(dict(self))
def _hash_data(data):
"""
Return a stable hash value for an arbitrary parsed-JSON data snippet.
"""
if isinstance(data, function.Function):
data = copy.deepcopy(data)
if not isinstance(data, six.string_types):
if isinstance(data, collections.Sequence):
return hash(tuple(_hash_data(d) for d in data))
if isinstance(data, collections.Mapping):
item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
return six.moves.reduce(operator.xor, item_hashes, 0)
return hash(data)
|
|
# -*- coding: utf-8 -*-
"""
Holds the code for cleaning out unwanted tags from the lxml
dom xpath.
"""
from .utils import ReplaceSequence
class DocumentCleaner(object):
def __init__(self, config):
"""Set appropriate tag names and regexes of tags to remove
from the HTML
"""
self.config = config
self.parser = self.config.get_parser()
self.remove_nodes_re = (
"^side$|combx|retweet|mediaarticlerelated|menucontainer|"
"navbar|storytopbar-bucket|utility-bar|inline-share-tools"
"|comment|PopularQuestions|contact|foot|footer|Footer|footnote"
"|cnn_strycaptiontxt|cnn_html_slideshow|cnn_strylftcntnt"
"|links|meta$|shoutbox|sponsor"
"|tags|socialnetworking|socialNetworking|cnnStryHghLght"
"|cnn_stryspcvbx|^inset$|pagetools|post-attributes"
"|welcome_form|contentTools2|the_answers"
"|communitypromo|runaroundLeft|subscribe|vcard|articleheadings"
"|date|^print$|popup|author-dropdown|tools|socialtools|byline"
"|konafilter|KonaFilter|breadcrumbs|^fn$|wp-caption-text"
"|legende|ajoutVideo|timestamp|js_replies"
)
self.regexp_namespace = "http://exslt.org/regular-expressions"
self.nauthy_ids_re = ("//*[re:test(@id, '%s', 'i')]" %
self.remove_nodes_re)
self.nauthy_classes_re = ("//*[re:test(@class, '%s', 'i')]" %
self.remove_nodes_re)
self.nauthy_names_re = ("//*[re:test(@name, '%s', 'i')]" %
self.remove_nodes_re)
self.div_to_p_re = r"<(a|blockquote|dl|div|img|ol|p|pre|table|ul)"
self.caption_re = "^caption$"
self.google_re = " google "
self.entries_re = "^[^entry-]more.*$"
self.facebook_re = "[^-]facebook"
self.facebook_braodcasting_re = "facebook-broadcasting"
self.twitter_re = "[^-]twitter"
self.tablines_replacements = ReplaceSequence()\
.create("\n", "\n\n")\
.append("\t")\
.append("^\\s+$")
def clean(self, doc_to_clean):
"""Remove chunks of the DOM as specified
"""
doc_to_clean = self.clean_body_classes(doc_to_clean)
doc_to_clean = self.clean_article_tags(doc_to_clean)
doc_to_clean = self.clean_em_tags(doc_to_clean)
doc_to_clean = self.remove_drop_caps(doc_to_clean)
doc_to_clean = self.remove_scripts_styles(doc_to_clean)
doc_to_clean = self.clean_bad_tags(doc_to_clean)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.caption_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.google_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.entries_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.facebook_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean,
self.facebook_braodcasting_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.twitter_re)
doc_to_clean = self.clean_para_spans(doc_to_clean)
doc_to_clean = self.div_to_para(doc_to_clean, 'div')
doc_to_clean = self.div_to_para(doc_to_clean, 'span')
return doc_to_clean
def clean_body_classes(self, doc):
"""Removes the `class` attribute from the <body> tag because
if there is a bad match, the entire DOM will be empty!
"""
elements = self.parser.getElementsByTag(doc, tag="body")
if elements:
self.parser.delAttribute(elements[0], attr="class")
return doc
def clean_article_tags(self, doc):
articles = self.parser.getElementsByTag(doc, tag='article')
for article in articles:
for attr in ['id', 'name', 'class']:
self.parser.delAttribute(article, attr=attr)
return doc
def clean_em_tags(self, doc):
ems = self.parser.getElementsByTag(doc, tag='em')
for node in ems:
images = self.parser.getElementsByTag(node, tag='img')
if len(images) == 0:
self.parser.drop_tag(node)
return doc
def remove_drop_caps(self, doc):
items = self.parser.css_select(doc, 'span[class~=dropcap], '
'span[class~=drop_cap]')
for item in items:
self.parser.drop_tag(item)
return doc
def remove_scripts_styles(self, doc):
# remove scripts
scripts = self.parser.getElementsByTag(doc, tag='script')
for item in scripts:
self.parser.remove(item)
# remove styles
styles = self.parser.getElementsByTag(doc, tag='style')
for item in styles:
self.parser.remove(item)
# remove comments
comments = self.parser.getComments(doc)
for item in comments:
self.parser.remove(item)
return doc
def clean_bad_tags(self, doc):
# ids
naughty_list = self.parser.xpath_re(doc, self.nauthy_ids_re)
for node in naughty_list:
self.parser.remove(node)
# class
naughty_classes = self.parser.xpath_re(doc, self.nauthy_classes_re)
for node in naughty_classes:
self.parser.remove(node)
# name
naughty_names = self.parser.xpath_re(doc, self.nauthy_names_re)
for node in naughty_names:
self.parser.remove(node)
return doc
def remove_nodes_regex(self, doc, pattern):
for selector in ['id', 'class']:
reg = "//*[re:test(@%s, '%s', 'i')]" % (selector, pattern)
naughty_list = self.parser.xpath_re(doc, reg)
for node in naughty_list:
self.parser.remove(node)
return doc
def clean_para_spans(self, doc):
spans = self.parser.css_select(doc, 'p span')
for item in spans:
self.parser.drop_tag(item)
return doc
def get_flushed_buffer(self, replacement_text, doc):
return self.parser.textToPara(replacement_text)
def replace_walk_left_right(self, kid, kid_text,
replacement_text, nodes_to_remove):
kid_text_node = kid
replace_text = self.tablines_replacements.replaceAll(kid_text)
if len(replace_text) > 1:
prev_node = self.parser.previousSibling(kid_text_node)
while prev_node is not None \
and self.parser.getTag(prev_node) == "a" \
and self.parser.getAttribute(
prev_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(prev_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(prev_node)
self.parser.setAttribute(prev_node, attr='grv-usedalready',
value='yes')
prev_node = self.parser.previousSibling(prev_node)
replacement_text.append(replace_text)
next_node = self.parser.nextSibling(kid_text_node)
while next_node is not None \
and self.parser.getTag(next_node) == "a" \
and self.parser.getAttribute(
next_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(next_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(next_node)
self.parser.setAttribute(next_node, attr='grv-usedalready',
value='yes')
next_node = self.parser.nextSibling(next_node)
def get_replacement_nodes(self, doc, div):
replacement_text = []
nodes_to_return = []
nodes_to_remove = []
kids = self.parser.childNodesWithText(div)
for kid in kids:
# The node is a <p> and already has some replacement text
if self.parser.getTag(kid) == 'p' and len(replacement_text) > 0:
new_node = self.get_flushed_buffer(
''.join(replacement_text), doc)
nodes_to_return.append(new_node)
replacement_text = []
nodes_to_return.append(kid)
# The node is a text node
elif self.parser.isTextNode(kid):
kid_text = self.parser.getText(kid)
self.replace_walk_left_right(kid, kid_text, replacement_text,
nodes_to_remove)
else:
nodes_to_return.append(kid)
# flush out anything still remaining
if(len(replacement_text) > 0):
new_node = self.get_flushed_buffer(''.join(replacement_text), doc)
nodes_to_return.append(new_node)
replacement_text = []
for n in nodes_to_remove:
self.parser.remove(n)
return nodes_to_return
def replace_with_para(self, doc, div):
self.parser.replaceTag(div, 'p')
def div_to_para(self, doc, dom_type):
bad_divs = 0
else_divs = 0
divs = self.parser.getElementsByTag(doc, tag=dom_type)
tags = ['a', 'blockquote', 'dl', 'div', 'img', 'ol', 'p',
'pre', 'table', 'ul']
for div in divs:
items = self.parser.getElementsByTags(div, tags)
if div is not None and len(items) == 0:
self.replace_with_para(doc, div)
bad_divs += 1
elif div is not None:
replaceNodes = self.get_replacement_nodes(doc, div)
div.clear()
for c, n in enumerate(replaceNodes):
div.insert(c, n)
else_divs += 1
return doc
|
|
"""comtypes.server.register - register and unregister a COM object.
Exports the UseCommandLine function. UseCommandLine is called with
the COM object classes that a module exposes. It parses the Windows
command line and takes the appropriate actions.
These command line options are supported:
/regserver - register the classes with COM.
/unregserver - unregister the classes with COM.
/nodebug - remove all logging configuration from the registry.
/l <name>=<level> - configure the logging level for the standard Python loggind module,
this option may be used several times.
/f <formatter> - specify the formatter string.
Note: Registering and unregistering the objects does remove logging
entries. Configuring the logging does not change other registry
entries, so it is possible to freeze a comobject with py2exe, register
it, then configure logging afterwards to debug it, and delete the
logging config afterwards.
Sample usage:
Register the COM object:
python mycomobj.py /regserver
Configure logging info:
python mycomobj.py /l comtypes=INFO /l comtypes.server=DEBUG /f %(message)s
Now, debug the object, and when done delete logging info:
python mycomobj.py /nodebug
"""
import sys, os
import _winreg
import logging
import comtypes
from comtypes.typeinfo import LoadTypeLibEx, UnRegisterTypeLib, REGKIND_REGISTER
from comtypes.hresult import *
from comtypes.server import w_getopt
import comtypes.server.inprocserver
from ctypes import windll, c_ulong, c_wchar_p, WinError, sizeof, create_string_buffer
_debug = logging.getLogger(__name__).debug
def get_winerror(exception):
try:
return exception.winerror
except AttributeError:
return exception.errno
# a SHDeleteKey function, will remove a registry key with all subkeys.
def _non_zero(retval, func, args):
if retval:
raise WinError(retval)
SHDeleteKey = windll.shlwapi.SHDeleteKeyW
SHDeleteKey.errcheck = _non_zero
SHDeleteKey.argtypes = c_ulong, c_wchar_p
try:
Set = set
except NameError:
from sets import Set #as set
_KEYS = {_winreg.HKEY_CLASSES_ROOT: "HKCR",
_winreg.HKEY_LOCAL_MACHINE: "HKLM",
_winreg.HKEY_CURRENT_USER: "HKCU"}
def _explain(hkey):
return _KEYS.get(hkey, hkey)
class Registrar(object):
"""COM class registration.
The COM class can override what this does by implementing
_register and/or _unregister class methods. These methods will be
called with the calling instance of Registrar, and so can call the
Registrars _register and _unregister methods which do the actual
work.
"""
def nodebug(self, cls):
"""Delete logging entries from the registry."""
clsid = cls._reg_clsid_
try:
_debug('DeleteKey( %s\\CLSID\\%s\\Logging"' % \
(_explain(_winreg.HKEY_CLASSES_ROOT), clsid))
hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, r"CLSID\%s" % clsid)
_winreg.DeleteKey(hkey, "Logging")
except WindowsError, detail:
if get_winerror(detail) != 2:
raise
def debug(self, cls, levels, format):
"""Write entries in the registry to setup logging for this clsid."""
# handlers
# format
clsid = cls._reg_clsid_
_debug('CreateKey( %s\\CLSID\\%s\\Logging"' % \
(_explain(_winreg.HKEY_CLASSES_ROOT), clsid))
hkey = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT, r"CLSID\%s\Logging" % clsid)
for item in levels:
name, value = item.split("=")
v = getattr(logging, value)
assert isinstance(v, int)
_debug('SetValueEx(levels, %s)' % levels)
_winreg.SetValueEx(hkey, "levels", None, _winreg.REG_MULTI_SZ, levels)
if format:
_debug('SetValueEx(format, %s)' % format)
_winreg.SetValueEx(hkey, "format", None, _winreg.REG_SZ, format)
else:
_debug('DeleteValue(format)')
try:
_winreg.DeleteValue(hkey, "format")
except WindowsError, detail:
if get_winerror(detail) != 2:
raise
def register(self, cls, executable=None):
"""Register the COM server class."""
# First, we unregister the object with force=True, to force removal
# of all registry entries, even if we would not write them.
# Second, we create new entries.
# It seems ATL does the same.
mth = getattr(cls, "_register", None)
if mth is not None:
mth(self)
else:
self._unregister(cls, force=True)
self._register(cls, executable)
def _register(self, cls, executable=None):
table = self._registry_entries(cls)
table.sort()
_debug("Registering %s", cls)
for hkey, subkey, valuename, value in table:
_debug ('[%s\\%s]', _explain(hkey), subkey)
_debug('%s="%s"', valuename or "@", value)
k = _winreg.CreateKey(hkey, subkey)
_winreg.SetValueEx(k, valuename, None, _winreg.REG_SZ, str(value))
tlib = getattr(cls, "_reg_typelib_", None)
if tlib is not None:
if hasattr(sys, "frozendllhandle"):
dll = self._get_serverdll()
_debug("LoadTypeLibEx(%s, REGKIND_REGISTER)", dll)
LoadTypeLibEx(dll, REGKIND_REGISTER)
else:
if executable:
path = executable
elif hasattr(sys, "frozen"):
path = sys.executable
else:
path = cls._typelib_path_
_debug("LoadTypeLibEx(%s, REGKIND_REGISTER)", path)
LoadTypeLibEx(path, REGKIND_REGISTER)
_debug("Done")
def unregister(self, cls, force=False):
"""Unregister the COM server class."""
mth = getattr(cls, "_unregister", None)
if mth is not None:
mth(self)
else:
self._unregister(cls, force=force)
def _unregister(self, cls, force=False):
# If force==False, we only remove those entries that we
# actually would have written. It seems ATL does the same.
table = [t[:2] for t in self._registry_entries(cls)]
# only unique entries
table = list(set(table))
table.sort()
table.reverse()
_debug("Unregister %s", cls)
for hkey, subkey in table:
try:
if force:
_debug("SHDeleteKey %s\\%s", _explain(hkey), subkey)
SHDeleteKey(hkey, subkey)
else:
_debug("DeleteKey %s\\%s", _explain(hkey), subkey)
_winreg.DeleteKey(hkey, subkey)
except WindowsError, detail:
if get_winerror(detail) != 2:
raise
tlib = getattr(cls, "_reg_typelib_", None)
if tlib is not None:
try:
_debug("UnRegisterTypeLib(%s, %s, %s)", *tlib)
UnRegisterTypeLib(*tlib)
except WindowsError, detail:
if not get_winerror(detail) in (TYPE_E_REGISTRYACCESS, TYPE_E_CANTLOADLIBRARY):
raise
_debug("Done")
def _get_serverdll(self):
"""Return the pathname of the dll hosting the COM object."""
handle = getattr(sys, "frozendllhandle", None)
if handle is not None:
buf = create_string_buffer(260)
windll.kernel32.GetModuleFileNameA(handle, buf, sizeof(buf))
return buf[:]
import _ctypes
return _ctypes.__file__
def _get_full_classname(self, cls):
"""Return <modulename>.<classname> for 'cls'."""
modname = cls.__module__
if modname == "__main__":
modname = os.path.splitext(os.path.basename(sys.argv[0]))[0]
return "%s.%s" % (modname, cls.__name__)
def _get_pythonpath(self, cls):
"""Return the filesystem path of the module containing 'cls'."""
modname = cls.__module__
dirname = os.path.dirname(sys.modules[modname].__file__)
return os.path.abspath(dirname)
def _registry_entries(self, cls):
"""Return a sequence of tuples containing registry entries.
The tuples must be (key, subkey, name, value).
Required entries:
=================
_reg_clsid_ - a string or GUID instance
_reg_clsctx_ - server type(s) to register
Optional entries:
=================
_reg_desc_ - a string
_reg_progid_ - a string naming the progid, typically 'MyServer.MyObject.1'
_reg_novers_progid_ - version independend progid, typically 'MyServer.MyObject'
_reg_typelib_ - an tuple (libid, majorversion, minorversion) specifying a typelib.
_reg_threading_ - a string specifying the threading model
Note that the first part of the progid string is typically the
IDL library name of the type library containing the coclass.
"""
HKCR = _winreg.HKEY_CLASSES_ROOT
# table format: rootkey, subkey, valuename, value
table = []
append = lambda *args: table.append(args)
# basic entry - names the comobject
reg_clsid = str(cls._reg_clsid_) # that's the only required attribute for registration
reg_desc = getattr(cls, "_reg_desc_", "")
if not reg_desc:
# Simple minded algorithm to construct a description from
# the progid:
reg_desc = getattr(cls, "_reg_novers_progid_", "") or \
getattr(cls, "_reg_progid_", "")
if reg_desc:
reg_desc = reg_desc.replace(".", " ")
append(HKCR, "CLSID\\%s" % reg_clsid, "", reg_desc)
reg_progid = getattr(cls, "_reg_progid_", None)
if reg_progid:
# for ProgIDFromCLSID:
append(HKCR, "CLSID\\%s\\ProgID" % reg_clsid, "", reg_progid) # 1
# for CLSIDFromProgID
if reg_desc:
append(HKCR, reg_progid, "", reg_desc) # 2
append(HKCR, "%s\\CLSID" % reg_progid, "", reg_clsid) # 3
reg_novers_progid = getattr(cls, "_reg_novers_progid_", None)
if reg_novers_progid:
append(HKCR, "CLSID\\%s\\VersionIndependentProgID" % reg_clsid, # 1a
"", reg_novers_progid)
if reg_desc:
append(HKCR, reg_novers_progid, "", reg_desc) # 2a
append(HKCR, "%s\\CurVer" % reg_novers_progid, "", reg_progid) #
append(HKCR, "%s\\CLSID" % reg_novers_progid, "", reg_clsid) # 3a
clsctx = getattr(cls, "_reg_clsctx_", 0)
if clsctx & comtypes.CLSCTX_LOCAL_SERVER \
and not hasattr(sys, "frozendllhandle"):
exe = sys.executable
if " " in exe:
exe = '"%s"' % exe
if not hasattr(sys, "frozen"):
if not __debug__:
exe = "%s -O" % exe
script = os.path.abspath(sys.modules[cls.__module__].__file__)
if " " in script:
script = '"%s"' % script
append(HKCR, "CLSID\\%s\\LocalServer32" % reg_clsid, "", "%s %s" % (exe, script))
else:
append(HKCR, "CLSID\\%s\\LocalServer32" % reg_clsid, "", "%s" % exe)
# Register InprocServer32 only when run from script or from
# py2exe dll server, not from py2exe exe server.
if clsctx & comtypes.CLSCTX_INPROC_SERVER \
and getattr(sys, "frozen", None) in (None, "dll"):
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"", self._get_serverdll())
# only for non-frozen inproc servers the PythonPath/PythonClass is needed.
if not hasattr(sys, "frozendllhandle") \
or not comtypes.server.inprocserver._clsid_to_class:
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"PythonClass", self._get_full_classname(cls))
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"PythonPath", self._get_pythonpath(cls))
reg_threading = getattr(cls, "_reg_threading_", None)
if reg_threading is not None:
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"ThreadingModel", reg_threading)
reg_tlib = getattr(cls, "_reg_typelib_", None)
if reg_tlib is not None:
append(HKCR, "CLSID\\%s\\Typelib" % reg_clsid, "", reg_tlib[0])
return table
################################################################
def register(cls):
Registrar().register(cls)
def unregister(cls):
Registrar().unregister(cls)
def UseCommandLine(*classes):
usage = """Usage: %s [-regserver] [-unregserver] [-nodebug] [-f logformat] [-l loggername=level]""" % sys.argv[0]
opts, args = w_getopt.w_getopt(sys.argv[1:],
"regserver unregserver embedding l: f: nodebug")
if not opts:
sys.stderr.write(usage + "\n")
return 0 # nothing for us to do
levels = []
format = None
nodebug = False
runit = False
for option, value in opts:
if option == "regserver":
for cls in classes:
register(cls)
elif option == "unregserver":
for cls in classes:
unregister(cls)
elif option == "embedding":
runit = True
elif option == "f":
format = value
elif option == "l":
levels.append(value)
elif option == "nodebug":
nodebug = True
if levels or format is not None:
for cls in classes:
Registrar().debug(cls, levels, format)
if nodebug:
for cls in classes:
Registrar().nodebug(cls)
if runit:
import comtypes.server.localserver
comtypes.server.localserver.run(classes)
return 1 # we have done something
if __name__ == "__main__":
UseCommandLine()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009, MARIMORE Inc Tokyo, Japan.
# Contributed by
# Iqbal Abdullah <iqbal@marimore.co.jp>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the MARIMORE Inc nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module defines helper functions
"""
__author__ = "Iqbal Abdullah <iqbal@marimore.co.jp>"
__date__ = "$LastChangedDate$"
__version__ = "$LastChangedRevision$"
import sys, datetime
import inspect
def import_object(qualified_name):
"""
import_object() will return the module/function/class which is specified
in qualified_name
@type qualified_name: string
@param qualified_name: The fully qualified package path to the target
@rtype: object or None
@return: An object to the target module/function/class or None if an exception
occurred
"""
parent_namespace = ".".join(qualified_name.split(".")[:-1])
target_namespace = qualified_name.split(".")[-1]
if parent_namespace == "":
# This is the top module
parent_namespace = target_namespace
try:
__import__(parent_namespace)
m = sys.modules[parent_namespace]
mod = getattr(m, target_namespace)
return mod
except (Exception) as e:
print "Exception occurred 02: import_object(): %s" % (e)
return None
def log_syslog(ident, message, priority="LOG_NOTICE", facility="LOG_USER"):
"""
Writes log messages via the system syslog(). This is for UNIX based systems only.
@type facility: string
@param facility: The facility to be used. It can be one of these:
1. LOG_KERN
2. LOG_USER
3. LOG_MAIL
4. LOG_DAEMON
5. LOG_AUTH
6. LOG_LPR
7. LOG_NEWS
8. LOG_UUCP
9. LOG_CRON
10. LOG_LOCAL0 to LOG_LOCAL7
By default it will be LOG_USER
@type priority: string
@param priority: The priority of the message, which can be one of these:
1. LOG_EMERG
2. LOG_ALERT
3. LOG_CRIT
4. LOG_ERR
5. LOG_WARNING
6. LOG_NOTICE
7. LOG_INFO
8. LOG_DEBUG
By default it is LOG_NOTICE
@type ident: string
@param ident: The prepended indent for your message. Usually this is the filename.
@type message: string
@param message: The message you want to log to
"""
try:
import syslog
except:
return -1
if facility == "LOG_KERN":
fac = syslog.LOG_KERN
elif facility == "LOG_MAIL":
fac = syslog.LOG_MAIL
elif facility == "LOG_DAEMON":
fac = syslog.LOG_DAEMON
elif facility == "LOG_AUTH":
fac = syslog.LOG_AUTH
elif facility == "LOG_LPR":
fac = syslog.LOG_LPR
elif facility == "LOG_NEWS":
fac = syslog.LOG_NEWS
elif facility == "LOG_UUCP":
fac = syslog.LOG_UUCP
elif facility == "LOG_CRON":
fac = syslog.LOG_CRON
elif facility[:9] == "LOG_LOCAL":
if facility[9] == "0":
fac = syslog.LOG_LOCAL0
elif facility[9] == "1":
fac = syslog.LOG_LOCAL1
elif facility[9] == "2":
fac = syslog.LOG_LOCAL2
elif facility[9] == "3":
fac = syslog.LOG_LOCAL3
elif facility[9] == "4":
fac = syslog.LOG_LOCAL4
elif facility[9] == "5":
fac = syslog.LOG_LOCAL5
elif facility[9] == "6":
fac = syslog.LOG_LOCAL6
elif facility[9] == "7":
fac = syslog.LOG_LOCAL7
else:
# There's only 0 to 7
fac = syslog.LOG_USER
else:
fac = syslog.LOG_USER
if priority == "LOG_EMERG":
prio = syslog.LOG_EMERG
elif priority == "LOG_ALERT":
prio = syslog.LOG_ALERT
elif priority == "LOG_CRIT":
prio = syslog.LOG_CRIT
elif priority == "LOG_ERR":
prio = syslog.LOG_ERR
elif priority == "LOG_WARNING":
prio = syslog.LOG_WARNING
elif priority == "LOG_INFO":
prio = syslog.LOG_INFO
elif priority == "LOG_DEBUG":
prio = syslog.LOG_DEBUG
else:
prio = syslog.LOG_NOTICE
syslog.openlog(ident, 0, fac)
syslog.syslog(prio, message)
def log_syslogn(message, ident=None, priority="LOG_NOTICE", facility="LOG_USER"):
"""
Shortcut to log to LOCALn. The function signature should be similar to log_syslog
except that message is swapped to be the first so indent would be optional.
If indent is None, use inspect module to get the calling function name.
"""
def get_ident(frame):
frame_info = inspect.getframeinfo(frame[0])
if 'self' in frame[0].f_locals:
instance = frame[0].f_locals['self']
else:
instance = None
mod = inspect.getmodule(frame[0])
filename, lineno, function, code_context, index = frame_info
if instance:
ident = '%s.%s.%s()' % (mod.__name__, instance.__class__.__name__,
function.strip())
else:
if mod.__name__ == '__main__':
modname = filename
else:
modname = mod.__name__
ident = '%s.%s()' % (modname, function.strip())
return ident
if not ident:
frame = None
frame_info = None
try:
frame = inspect.stack()[2]
ident = get_ident(frame)
except Exception:
ident = ''
finally:
del frame
del frame_info
# split message by newline and then joining it back, removing any tabs or
# space that resulted from indentation. Log message always in single line.
messages = [line.strip() for line in message.split()]
message1 = " ".join(messages)
log_syslog(ident, message1, priority, facility=facility)
def log_syslog0(message, ident=None, priority="LOG_NOTICE", facility="LOG_LOCAL0"):
log_syslogn(message, ident, priority, facility=facility)
def log_syslog1(message, ident=None, priority="LOG_NOTICE", facility="LOG_LOCAL1"):
log_syslogn(message, ident, priority, facility=facility)
def log_syslog2(message, ident=None, priority="LOG_NOTICE", facility="LOG_LOCAL2"):
log_syslogn(message, ident, priority, facility=facility)
def log_syslog3(message, ident=None, priority="LOG_NOTICE", facility="LOG_LOCAL3"):
log_syslogn(message, ident, priority, facility=facility)
def log_syslog4(message, ident=None, priority="LOG_NOTICE", facility="LOG_LOCAL4"):
log_syslogn(message, ident, priority, facility=facility)
def uniqify_list(seq, idfun=None):
"""
Uniqify a list while preserving order. i.e
If you have a list such as [1,2,2,2,3,4,5,6,6,6,6] uniqify_list() while
return [1,2,3,4,5,6]
You can also specify a function to transform the data, i.e
>>> a=list('ABeeE')
>>> uniqify_list(a)
['A','B','e','E']
>>> uniqify_list(a, lambda x: x.lower())
['A','B','e']
Taken from http://www.peterbe.com/plog/uniqifiers-benchmark
@type seq: list
@param seq: The list which you want to uniqify
@type idfun: a function, defaults to None
@param idfun: A function which you want to process the data with
@rtype: list
@return: An list which has been uniqified
"""
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def calculate_time(base_time, diff_seconds, reverse=False):
"""
Calculates a new time if you add diff_seconds to base_time
@type base_time: datetime object
@param base_time: The base time which you want to do the calculation on
@type diff_seconds: signed int
@param diff_seconds: The time difference in seconds. Use a negative value if the time is in the past
@type reverse: bool
@param reverse: Reverse calculation; Instead of adding diff_seconds to base_time, you do deduction.
This is useful in cases where you have the localtime and the delta but you want to
find the UTC.
@rtype: datetime object
@return: A datetime object representing the calculated time
"""
diff_time = datetime.timedelta(seconds=diff_seconds)
if reverse:
new_time = base_time - diff_time
else:
new_time = base_time + diff_time
return new_time
if __name__ == '__main__':
#log_syslog("test", "test test")
#import_object('test')
print uniqify_list(list('123123454324332ABCAABCabcaabcrt'))
print calculate_time(datetime.datetime.now(), -60)
print calculate_time(datetime.datetime.now(), -60, True)
|
|
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
|
|
# -*- coding: utf-8 -*-
# Copyright 2010-2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A library to operate version definition file.
This script has two functionarity which relate to version definition file.
1. Generate version definition file from template and given parameters.
To generate version definition file, use GenerateVersionFileFromTemplate
method.
2. Parse (generated) version definition file.
To parse, use MozcVersion class.
Typically version definition file is ${PROJECT_ROOT}/mozc_version.txt
(Not in the repository because it is generated by this script)
Typically version template file is ${PROJECT_ROOT}/mozc_version_template.txt,
which is in the repository.
The syntax of template is written in the template file.
"""
# TODO(matsuzaki): MozcVersion class should have factory method which takes
# file path and we should remove all the module methods instead to
# simplify the design. Currently I'd keep this design to reduce
# client side's change.
import datetime
import logging
import optparse
import os
import re
import sys
TARGET_PLATFORM_TO_DIGIT = {
'Windows': '0',
'Mac': '1',
'Linux': '2',
'Android': '3',
'NaCl': '4',
}
VERSION_PROPERTIES = [
'MAJOR',
'MINOR',
'BUILD',
'REVISION',
'ANDROID_VERSION_CODE',
'FLAG',
'TARGET_PLATFORM',
'ANDROID_APPLICATION_ID',
'ANDROID_SERVICE_NAME',
'NACL_DICTIONARY_VERSION',
'ANDROID_ARCH',
]
MOZC_EPOCH = datetime.date(2009, 5, 24)
def _GetRevisionForPlatform(revision, target_platform):
"""Returns the revision for the current platform."""
if revision is None:
logging.critical('REVISION property is not found in the template file')
sys.exit(1)
last_digit = TARGET_PLATFORM_TO_DIGIT.get(target_platform, None)
if last_digit is None:
logging.critical('target_platform %s is invalid. Accetable ones are %s',
target_platform, TARGET_PLATFORM_TO_DIGIT.keys())
sys.exit(1)
if not revision:
return revision
if last_digit:
return revision[0:-1] + last_digit
# If not supported, just use the specified version.
return revision
def _ParseVersionTemplateFile(template_path, target_platform,
android_application_id, android_arch):
"""Parses a version definition file.
Args:
template_path: A filename which has the version definition.
target_platform: The target platform on which the programs run.
android_application_id: Android application id.
android_arch: Android architecture (arm, x86, mips)
Returns:
A dictionary generated from the template file.
"""
template_dict = {}
with open(template_path) as template_file:
for line in template_file:
matchobj = re.match(r'(\w+)=(.*)', line.strip())
if matchobj:
var = matchobj.group(1)
val = matchobj.group(2)
if var in template_dict:
logging.warning(('Dupulicate key: "%s". Later definition "%s"'
'overrides earlier one "%s".'),
var, val, template_dict[var])
template_dict[var] = val
# Some properties need to be tweaked.
template_dict['REVISION'] = _GetRevisionForPlatform(
template_dict.get('REVISION', None), target_platform)
num_of_days = datetime.date.today().toordinal() - MOZC_EPOCH.toordinal()
if template_dict['BUILD'] == 'daily':
template_dict['BUILD'] = str(num_of_days)
template_dict.setdefault('FLAG', 'CONTINUOUS')
else:
template_dict.setdefault('FLAG', 'RELEASE')
template_dict['ANDROID_VERSION_CODE'] = (
str(_GetAndroidVersionCode(int(template_dict['BUILD']), android_arch)))
template_dict['TARGET_PLATFORM'] = target_platform
template_dict['ANDROID_APPLICATION_ID'] = android_application_id
template_dict['ANDROID_SERVICE_NAME'] = (
'org.mozc.android.inputmethod.japanese.MozcService')
template_dict['ANDROID_ARCH'] = android_arch
return template_dict
def _GetAndroidVersionCode(base_version_code, arch):
"""Gets version code based on base version code and architecture.
Args:
base_version_code: is typically equal to the field BUILD in mozc_version.txt
arch: Android's architecture (e.g., x86, arm, mips)
Returns:
version code (int)
Raises:
RuntimeError: arch is unexpected one or base_version_code is too big.
Version code format:
0005BBBBBA
A: ABI (0: Fat, 6: x86_64, 5:arm64, 4:mips64, 3: x86, 2: armeabi-v7a, 1:mips)
B: ANDROID_VERSION_CODE
Note:
- Prefix 5 is introduced because of historical reason.
Previously ANDROID_VERSION_CODE (B) was placed after ABI (A) but
it's found that swpping the order is reasonable.
Previously version code for x86 was always greater than that for armeabi.
Therefore version-check rule like "Version code of update must be greater
than that of previous" cannot be introduced.
"""
arch_to_abi_code = {
'x86_64': 6,
'arm64': 5,
'mips64': 4,
'x86': 3,
'arm': 2,
'mips': 1,
}
abi_code = arch_to_abi_code.get(arch)
if abi_code is None:
raise RuntimeError('Unexpected architecture; %s' % arch)
if base_version_code >= 10000:
raise RuntimeError('Version code is greater than 10000. '
'It is time to revisit version code scheme.')
return int('5%05d%d' % (base_version_code, abi_code))
def _GetVersionInFormat(properties, version_format):
"""Returns the version string based on the specified format.
format can contains @MAJOR@, @MINOR@, @BUILD@ and @REVISION@ which are
replaced by self._major, self._minor, self._build, and self._revision
respectively.
Args:
properties: a property dicitonary. Typically gotten from
_ParseVersionTemplateFile method.
version_format: a string which contains version patterns.
Returns:
Return the version string in the format of format.
"""
result = version_format
for keyword in VERSION_PROPERTIES:
result = result.replace('@%s@' % keyword, properties.get(keyword, ''))
return result
def GenerateVersionFileFromTemplate(template_path,
output_path,
version_format,
target_platform,
android_application_id='',
android_arch='arm'):
"""Generates version file from template file and given parameters.
Args:
template_path: A path to template file.
output_path: A path to generated version file.
If already exists and the content will not be updated, nothing is done
(the timestamp is not updated).
version_format: A string which contans version patterns.
target_platform: The target platform on which the programs run.
android_application_id: Android application id.
android_arch: Android architecture (arm, x86, mips)
"""
properties = _ParseVersionTemplateFile(template_path, target_platform,
android_application_id,
android_arch)
version_definition = _GetVersionInFormat(properties, version_format)
old_content = ''
if os.path.exists(output_path):
# If the target file already exists, need to check the necessity of update
# to reduce file-creation frequency.
# Currently generated version file is not seen from Make (and Make like
# tools) so recreation will not cause serious issue but just in case.
with open(output_path) as output_file:
old_content = output_file.read()
if version_definition != old_content:
with open(output_path, 'w') as output_file:
output_file.write(version_definition)
def GenerateVersionFile(version_template_path, version_path, target_platform,
android_application_id, android_arch):
"""Reads the version template file and stores it into version_path.
This doesn't update the "version_path" if nothing will be changed to
reduce unnecessary build caused by file timestamp.
Args:
version_template_path: a file name which contains the template of version.
version_path: a file name to be stored the official version.
target_platform: target platform name. c.f. --target_platform option
android_application_id: [Android Only] application id
(e.g. org.mozc.android).
android_arch: Android architecture (arm, x86, mips)
"""
version_format = '\n'.join([
'MAJOR=@MAJOR@',
'MINOR=@MINOR@',
'BUILD=@BUILD@',
'REVISION=@REVISION@',
'ANDROID_VERSION_CODE=@ANDROID_VERSION_CODE@',
'FLAG=@FLAG@',
'TARGET_PLATFORM=@TARGET_PLATFORM@',
'ANDROID_APPLICATION_ID=@ANDROID_APPLICATION_ID@',
'ANDROID_SERVICE_NAME=@ANDROID_SERVICE_NAME@',
'NACL_DICTIONARY_VERSION=@NACL_DICTIONARY_VERSION@',
'ANDROID_ARCH=@ANDROID_ARCH@'
]) + '\n'
GenerateVersionFileFromTemplate(
version_template_path,
version_path,
version_format,
target_platform=target_platform,
android_application_id=android_application_id,
android_arch=android_arch)
class MozcVersion(object):
"""A class to parse and maintain the version definition data.
Note that this class is not intended to parse "template" file but to
"generated" file.
Typical usage is;
GenerateVersionFileFromTemplate(template_path, version_path, format)
version = MozcVersion(version_path)
"""
def __init__(self, path):
"""Parses a version definition file.
Args:
path: A filename which has the version definition.
If the file is not existent, empty properties are prepared instead.
"""
self._properties = {}
if not os.path.isfile(path):
return
for line in open(path):
matchobj = re.match(r'(\w+)=(.*)', line.strip())
if matchobj:
var = matchobj.group(1)
val = matchobj.group(2)
if var not in self._properties:
self._properties[var] = val
# Check mandatory properties.
for key in VERSION_PROPERTIES:
if key not in self._properties:
# Don't raise error nor exit.
# Error handling is the client's responsibility.
logging.warning('Mandatory key "%s" does not exist in %s', key, path)
def IsDevChannel(self):
"""Returns true if the parsed version is dev-channel."""
revision = self._properties['REVISION']
return revision is not None and len(revision) >= 3 and revision[-3] == '1'
def GetTargetPlatform(self):
"""Returns the target platform.
Returns:
A string for target platform.
If the version file is not existent, None is returned.
"""
return self._properties.get('TARGET_PLATFORM', None)
def GetVersionString(self):
"""Returns the normal version info string.
Returns:
a string in format of "MAJOR.MINOR.BUILD.REVISION"
"""
return self.GetVersionInFormat('@MAJOR@.@MINOR@.@BUILD@.@REVISION@')
def GetVersionInFormat(self, version_format):
"""Returns the version string based on the specified format."""
return _GetVersionInFormat(self._properties, version_format)
def GetAndroidArch(self):
"""Returns Android architecture."""
return self._properties.get('ANDROID_ARCH', None)
def main():
"""Generates version file based on the default format.
Generated file is mozc_version.txt compatible.
"""
parser = optparse.OptionParser(usage='Usage: %prog ')
parser.add_option('--template_path', dest='template_path',
help='Path to a template version file.')
parser.add_option('--output', dest='output',
help='Path to the output version file.')
parser.add_option('--target_platform', dest='target_platform',
help='Target platform of the version info.')
parser.add_option('--android_application_id', dest='android_application_id',
default='my.application.id',
help='Specifies the application id (Android Only).')
parser.add_option('--android_arch', dest='android_arch',
default='arm',
help='Specifies Android architecture (arm, x86, mips) '
'(Android Only)')
(options, args) = parser.parse_args()
assert not args, 'Unexpected arguments.'
assert options.template_path, 'No --template_path was specified.'
assert options.output, 'No --output was specified.'
assert options.target_platform, 'No --target_platform was specified.'
GenerateVersionFile(
version_template_path=options.template_path,
version_path=options.output,
target_platform=options.target_platform,
android_application_id=options.android_application_id,
android_arch=options.android_arch)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import json
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from abc import abstractmethod
from collections import OrderedDict, defaultdict, namedtuple
import six
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.java.util import execute_runner
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir, safe_open
from pants.util.fileutil import atomic_copy
class IvyResolutionStep(object):
"""Ivy specific class for describing steps of performing resolution."""
# NB(nh): This class is the base class for the ivy resolve and fetch steps.
# It also specifies the abstract methods that define the components of resolution steps.
def __init__(self, confs, hash_name, pinned_artifacts, soft_excludes, ivy_cache_dir,
global_ivy_workdir):
"""
:param confs: A tuple of string ivy confs to resolve for.
:param hash_name: A unique string name for this resolve.
:param pinned_artifacts: A tuple of "artifact-alikes" to force the versions of.
:param soft_excludes: A flag marking whether to pass excludes to Ivy or to apply them after the
fact.
:param ivy_cache_dir: The cache directory used by Ivy for this resolution step.
:param global_ivy_workdir: The workdir that all ivy outputs live in.
"""
self.confs = confs
self.hash_name = hash_name
self.pinned_artifacts = pinned_artifacts
self.soft_excludes = soft_excludes
self.ivy_cache_dir = ivy_cache_dir
self.global_ivy_workdir = global_ivy_workdir
self.workdir_reports_by_conf = {c: self.resolve_report_path(c) for c in confs}
@abstractmethod
def required_load_files_exist(self):
"""The files required to load a previous resolve exist."""
@abstractmethod
def required_exec_files_exist(self):
"""The files to do a resolve exist."""
@abstractmethod
def load(self, targets):
"""Loads the result of a resolve or fetch."""
@abstractmethod
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
"""Runs the resolve or fetch and loads the result, returning it."""
@property
def workdir(self):
return os.path.join(self.global_ivy_workdir, self.hash_name)
@property
def symlink_classpath_filename(self):
return os.path.join(self.workdir, 'classpath')
@property
def ivy_cache_classpath_filename(self):
return '{}.raw'.format(self.symlink_classpath_filename)
@property
def frozen_resolve_file(self):
return os.path.join(self.workdir, 'resolution.json')
@property
def symlink_dir(self):
return os.path.join(self.global_ivy_workdir, 'jars')
@abstractmethod
def ivy_xml_path(self):
"""Ivy xml location."""
@abstractmethod
def resolve_report_path(self, conf):
"""Location of the resolve report in the workdir."""
def _construct_and_load_symlink_map(self):
artifact_paths, symlink_map = IvyUtils.construct_and_load_symlink_map(
self.symlink_dir,
self.ivy_cache_dir,
self.ivy_cache_classpath_filename,
self.symlink_classpath_filename)
return artifact_paths, symlink_map
def _call_ivy(self, executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name):
IvyUtils.do_resolve(executor,
extra_args,
ivyxml,
jvm_options,
self.workdir_reports_by_conf,
self.confs,
self.ivy_cache_dir,
self.ivy_cache_classpath_filename,
hash_name_for_report,
workunit_factory,
workunit_name)
class IvyFetchStep(IvyResolutionStep):
"""Resolves ivy artifacts using the coordinates from a previous resolve."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename) and
os.path.isfile(self.frozen_resolve_file))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'fetch-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'fetch-ivy.xml')
def required_exec_files_exist(self):
return os.path.isfile(self.frozen_resolve_file)
def load(self, targets):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
return self._load_from_fetch(frozen_resolutions)
def exec_and_load(self, executor, extra_args, targets, jvm_options, workunit_name,
workunit_factory):
try:
frozen_resolutions = FrozenResolution.load_from_file(self.frozen_resolve_file,
targets)
except Exception as e:
logger.debug('Failed to load {}: {}'.format(self.frozen_resolve_file, e))
return NO_RESOLVE_RUN_RESULT
self._do_fetch(executor, extra_args, frozen_resolutions, jvm_options,
workunit_name, workunit_factory)
result = self._load_from_fetch(frozen_resolutions)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
return result
def _load_from_fetch(self, frozen_resolutions):
artifact_paths, symlink_map = self._construct_and_load_symlink_map()
return IvyFetchResolveResult(artifact_paths,
symlink_map,
self.hash_name,
self.workdir_reports_by_conf,
frozen_resolutions)
def _do_fetch(self, executor, extra_args, frozen_resolution, jvm_options, workunit_name,
workunit_factory):
# It's important for fetches to have a different ivy report from resolves as their
# contents differ.
hash_name_for_report = '{}-fetch'.format(self.hash_name)
ivyxml = self.ivy_xml_path
self._prepare_ivy_xml(frozen_resolution, ivyxml, hash_name_for_report)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name_for_report,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, frozen_resolution, ivyxml, resolve_hash_name_for_report):
# NB(nh): Our ivy.xml ensures that we always get the default configuration, even if it's not
# part of the requested confs.
default_resolution = frozen_resolution.get('default')
if default_resolution is None:
raise IvyUtils.IvyError("Couldn't find the frozen resolution for the 'default' ivy conf.")
try:
jars = default_resolution.jar_dependencies
IvyUtils.generate_fetch_ivy(jars, ivyxml, self.confs, resolve_hash_name_for_report)
except Exception as e:
raise IvyUtils.IvyError('Failed to prepare ivy resolve: {}'.format(e))
class IvyResolveStep(IvyResolutionStep):
"""Resolves ivy artifacts and produces a cacheable file containing the resulting coordinates."""
def required_load_files_exist(self):
return (all(os.path.isfile(report) for report in self.workdir_reports_by_conf.values()) and
os.path.isfile(self.ivy_cache_classpath_filename))
def resolve_report_path(self, conf):
return os.path.join(self.workdir, 'resolve-report-{}.xml'.format(conf))
@property
def ivy_xml_path(self):
return os.path.join(self.workdir, 'resolve-ivy.xml')
def load(self, targets):
artifact_paths, symlink_map = self._construct_and_load_symlink_map()
return IvyResolveResult(artifact_paths,
symlink_map,
self.hash_name,
self.workdir_reports_by_conf)
def exec_and_load(self, executor, extra_args, targets, jvm_options,
workunit_name, workunit_factory):
self._do_resolve(executor, extra_args, targets, jvm_options, workunit_name, workunit_factory)
result = self.load(targets)
if not result.all_linked_artifacts_exist():
raise IvyResolveMappingError(
'Some artifacts were not linked to {} for {}'.format(self.global_ivy_workdir,
result))
frozen_resolutions_by_conf = result.get_frozen_resolutions_by_conf(targets)
FrozenResolution.dump_to_file(self.frozen_resolve_file, frozen_resolutions_by_conf)
return result
def _do_resolve(self, executor, extra_args, targets, jvm_options, workunit_name, workunit_factory):
safe_mkdir(self.workdir)
ivyxml = self.ivy_xml_path
hash_name = '{}-resolve'.format(self.hash_name)
self._prepare_ivy_xml(targets, ivyxml, hash_name)
self._call_ivy(executor, extra_args, ivyxml, jvm_options, hash_name,
workunit_factory, workunit_name)
def _prepare_ivy_xml(self, targets, ivyxml, hash_name):
# TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
# diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
# See: https://github.com/pantsbuild/pants/issues/2239
jars, global_excludes = IvyUtils.calculate_classpath(targets)
# Don't pass global excludes to ivy when using soft excludes.
if self.soft_excludes:
global_excludes = []
IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, self.confs,
hash_name, self.pinned_artifacts)
class FrozenResolution(object):
"""Contains the abstracted results of a resolve.
With this we can do a simple fetch.
"""
# TODO(nh): include full dependency graph in here.
# So that we can inject it into the build graph if we want to.
class MissingTarget(Exception):
"""Thrown when a loaded resolution has a target spec for a target that doesn't exist."""
def __init__(self):
self.target_to_resolved_coordinates = defaultdict(OrderedSet)
self.all_resolved_coordinates = OrderedSet()
self.coordinate_to_attributes = OrderedDict()
@property
def jar_dependencies(self):
return [
JarDependency(c.org, c.name, c.rev, classifier=c.classifier, ext=c.ext,
**self.coordinate_to_attributes.get(c, {}))
for c in self.all_resolved_coordinates]
def add_resolved_jars(self, target, resolved_jars):
coords = [j.coordinate for j in resolved_jars]
self.add_resolution_coords(target, coords)
# Assuming target is a jar library.
for j in target.jar_dependencies:
url = j.get_url(relative=True)
if url:
self.coordinate_to_attributes[j.coordinate] = {'url': url, 'base_path': j.base_path}
else:
self.coordinate_to_attributes[j.coordinate] = {}
def add_resolution_coords(self, target, coords):
for c in coords:
self.target_to_resolved_coordinates[target].add(c)
self.all_resolved_coordinates.add(c)
def target_spec_to_coordinate_strings(self):
return {t.address.spec: [str(c) for c in coordinates]
for t, coordinates in self.target_to_resolved_coordinates.items()}
def __repr__(self):
return 'FrozenResolution(\n target_to_resolved_coordinates\n {}\n all\n {}'.format(
'\n '.join(': '.join([t.address.spec,
'\n '.join(str(c) for c in cs)])
for t,cs in self.target_to_resolved_coordinates.items()),
'\n '.join(str(c) for c in self.coordinate_to_attributes.keys())
)
def __eq__(self, other):
return (type(self) == type(other) and
self.all_resolved_coordinates == other.all_resolved_coordinates and
self.target_to_resolved_coordinates == other.target_to_resolved_coordinates)
def __ne__(self, other):
return not self == other
@classmethod
def load_from_file(cls, filename, targets):
if not os.path.exists(filename):
return None
with open(filename) as f:
# Using OrderedDict here to maintain insertion order of dict entries.
from_file = json.load(f, object_pairs_hook=OrderedDict)
result = {}
target_lookup = {t.address.spec: t for t in targets}
for conf, serialized_resolution in from_file.items():
resolution = FrozenResolution()
def m2_for(c):
return M2Coordinate.from_string(c)
for coord, attr_dict in serialized_resolution['coord_to_attrs'].items():
m2 = m2_for(coord)
resolution.coordinate_to_attributes[m2] = attr_dict
for spec, coord_strs in serialized_resolution['target_to_coords'].items():
t = target_lookup.get(spec, None)
if t is None:
raise cls.MissingTarget('Cannot find target for address {} in frozen resolution'
.format(spec))
resolution.add_resolution_coords(t, [m2_for(c) for c in coord_strs])
result[conf] = resolution
return result
@classmethod
def dump_to_file(cls, filename, resolutions_by_conf):
res = {}
for conf, resolution in resolutions_by_conf.items():
res[conf] = OrderedDict([
['target_to_coords',resolution.target_spec_to_coordinate_strings()],
['coord_to_attrs', OrderedDict([str(c), attrs]
for c, attrs in resolution.coordinate_to_attributes.items())]
])
with safe_concurrent_creation(filename) as tmp_filename:
with open(tmp_filename, 'wb') as f:
json.dump(res, f)
class IvyResolveResult(object):
"""The result of an Ivy resolution.
The result data includes the list of resolved artifacts, the relationships between those artifacts
and the targets that requested them and the hash name of the resolve.
"""
def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf):
self._reports_by_conf = reports_by_conf
self.resolved_artifact_paths = resolved_artifact_paths
self.resolve_hash_name = resolve_hash_name
self._symlink_map = symlink_map
@property
def has_resolved_artifacts(self):
"""The requested targets have a resolution associated with them."""
return self.resolve_hash_name is not None
def all_linked_artifacts_exist(self):
"""All of the artifact paths for this resolve point to existing files."""
if not self.has_resolved_artifacts:
return False
for path in self.resolved_artifact_paths:
if not os.path.isfile(path):
return False
else:
return True
def report_for_conf(self, conf):
"""Returns the path to the ivy report for the provided conf.
Returns None if there is no path.
"""
return self._reports_by_conf.get(conf)
def get_frozen_resolutions_by_conf(self, targets):
frozen_resolutions_by_conf = OrderedDict()
for conf in self._reports_by_conf:
frozen_resolution = FrozenResolution()
for target, resolved_jars in self.resolved_jars_for_each_target(conf, targets):
frozen_resolution.add_resolved_jars(target, resolved_jars)
frozen_resolutions_by_conf[conf] = frozen_resolution
return frozen_resolutions_by_conf
def resolved_jars_for_each_target(self, conf, targets):
"""Yields the resolved jars for each passed JarLibrary.
If there is no report for the requested conf, yields nothing.
:param conf: The ivy conf to load jars for.
:param targets: The collection of JarLibrary targets to find resolved jars for.
:yield: target, resolved_jars
:raises IvyTaskMixin.UnresolvedJarError
"""
ivy_info = self._ivy_info_for(conf)
if not ivy_info:
return
jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)]
ivy_jar_memo = {}
for target in jar_library_targets:
# Add the artifacts from each dependency module.
resolved_jars = self._resolved_jars_with_symlinks(conf, ivy_info, ivy_jar_memo,
self._jar_dependencies_for_target(conf,
target),
target)
yield target, resolved_jars
def _jar_dependencies_for_target(self, conf, target):
return target.jar_dependencies
def _ivy_info_for(self, conf):
report_path = self._reports_by_conf.get(conf)
return IvyUtils.parse_xml_report(conf, report_path)
def _new_resolved_jar_with_symlink_path(self, conf, target, resolved_jar_without_symlink):
def candidate_cache_paths():
# There is a focus on being lazy here to avoid `os.path.realpath` when we can.
yield resolved_jar_without_symlink.cache_path
yield os.path.realpath(resolved_jar_without_symlink.cache_path)
for cache_path in candidate_cache_paths():
pants_path = self._symlink_map.get(cache_path)
if pants_path:
break
else:
raise IvyResolveMappingError(
'Jar {resolved_jar} in {spec} not resolved to the ivy '
'symlink map in conf {conf}.'
.format(spec=target.address.spec,
resolved_jar=resolved_jar_without_symlink.cache_path,
conf=conf))
return ResolvedJar(coordinate=resolved_jar_without_symlink.coordinate,
pants_path=pants_path,
cache_path=resolved_jar_without_symlink.cache_path)
def _resolved_jars_with_symlinks(self, conf, ivy_info, ivy_jar_memo, coordinates, target):
raw_resolved_jars = ivy_info.get_resolved_jars_for_coordinates(coordinates,
memo=ivy_jar_memo)
resolved_jars = [self._new_resolved_jar_with_symlink_path(conf, target, raw_resolved_jar)
for raw_resolved_jar in raw_resolved_jars]
return resolved_jars
class IvyFetchResolveResult(IvyResolveResult):
"""A resolve result that uses the frozen resolution to look up dependencies."""
def __init__(self, resolved_artifact_paths, symlink_map, resolve_hash_name, reports_by_conf,
frozen_resolutions):
super(IvyFetchResolveResult, self).__init__(resolved_artifact_paths, symlink_map,
resolve_hash_name, reports_by_conf)
self._frozen_resolutions = frozen_resolutions
def _jar_dependencies_for_target(self, conf, target):
return self._frozen_resolutions[conf].target_to_resolved_coordinates.get(target, ())
NO_RESOLVE_RUN_RESULT = IvyResolveResult([], {}, None, {})
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
Dependency = namedtuple('DependencyAttributes',
['org', 'name', 'rev', 'mutable', 'force', 'transitive'])
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
class IvyModuleRef(object):
"""
:API: public
"""
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
def __cmp__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return cmp((self.org, self.name, self.classifier, self.ext, self.rev),
(other.org, other.name, other.classifier, other.ext, other.rev))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
"""
:API: public
"""
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
def __repr__(self):
return 'IvyInfo(conf={}, refs={})'.format(self._conf, self.modules_by_ref.keys())
class IvyUtils(object):
"""Useful methods related to interaction with ivy.
:API: public
"""
# Protects ivy executions.
_ivy_lock = threading.RLock()
# Protect writes to the global map of jar path -> symlinks to that jar.
_symlink_map_lock = threading.Lock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
def _load_classpath_from_cachepath(path):
if not os.path.exists(path):
return []
else:
with safe_open(path, 'r') as cp:
return filter(None, (path.strip() for path in cp.read().split(os.pathsep)))
@classmethod
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf,
confs, ivy_cache_dir, ivy_cache_classpath_filename, resolve_hash_name,
workunit_factory, workunit_name):
"""Execute Ivy with the given ivy.xml and copies all relevant files into the workdir.
This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending
on whether there is an existing frozen resolution.
After it is run, the Ivy reports are copied into the workdir at the paths specified by
workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts
and their transitive dependencies.
:param executor: A JVM executor to use to invoke ivy.
:param extra_args: Extra arguments to pass to ivy.
:param ivyxml: The input ivy.xml containing the dependencies to resolve.
:param jvm_options: A list of jvm option strings to use for the ivy invoke, or None.
:param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir.
:param confs: The confs used in the resolve.
:param resolve_hash_name: The hash to use as the module name for finding the ivy report file.
:param workunit_factory: A workunit factory for the ivy invoke, or None.
:param workunit_name: A workunit name for the ivy invoke, or None.
"""
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory)
with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp:
extra_args = extra_args or []
args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args
with cls._ivy_lock:
cls._exec_ivy(ivy, confs, ivyxml, args,
jvm_options=jvm_options,
executor=executor,
workunit_name=workunit_name,
workunit_factory=workunit_factory)
if not os.path.exists(raw_target_classpath_file_tmp):
raise cls.IvyError('Ivy failed to create classpath file at {}'
.format(raw_target_classpath_file_tmp))
cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name)
logger.debug('Moved ivy classfile file to {dest}'
.format(dest=ivy_cache_classpath_filename))
@classmethod
def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name):
for conf in confs:
ivy_cache_report_path = IvyUtils.xml_report_path(ivy_cache_dir, resolve_hash_name,
conf)
workdir_report_path = workdir_report_paths_by_conf[conf]
try:
atomic_copy(ivy_cache_report_path,
workdir_report_path)
except IOError as e:
raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
.format(ivy_cache_report_path, workdir_report_path, e))
@classmethod
def _exec_ivy(cls, ivy, confs, ivyxml, args, jvm_options, executor,
workunit_name, workunit_factory):
ivy = ivy or Bootstrapper.default_ivy()
ivy_args = ['-ivy', ivyxml]
ivy_args.append('-confs')
ivy_args.extend(confs)
ivy_args.extend(args)
ivy_jvm_options = list(jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
with ivy.resolution_lock:
result = execute_runner(runner, workunit_factory=workunit_factory,
workunit_name=workunit_name)
if result != 0:
raise IvyUtils.IvyError('Ivy returned {result}. cmd={cmd}'.format(result=result,
cmd=runner.cmd))
except runner.executor.Error as e:
raise IvyUtils.IvyError(e)
@classmethod
def construct_and_load_symlink_map(cls, symlink_dir, ivy_cache_dir,
ivy_cache_classpath_filename, symlink_classpath_filename):
# Make our actual classpath be symlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the symlinks.
with IvyUtils._symlink_map_lock:
# A common dir for symlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known symlink dir, again so that paths are
# consistent across builds.
symlink_map = cls._symlink_cachepath(ivy_cache_dir,
ivy_cache_classpath_filename,
symlink_dir,
symlink_classpath_filename)
classpath = cls._load_classpath_from_cachepath(symlink_classpath_filename)
return classpath, symlink_map
@classmethod
def _symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath):
"""Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir.
If there is an existing symlink for a file under inpath, it is used rather than creating
a new symlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> symlink to that path.
"""
safe_mkdir(symlink_dir)
# The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the symlink'ed path and the realpath to the jar to the symlink map.
real_ivy_cache_dir = os.path.realpath(ivy_cache_dir)
symlink_map = OrderedDict()
inpaths = cls._load_classpath_from_cachepath(inpath)
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't symlink it.
symlink_map[path] = path
# Create symlinks for paths in the ivy cache dir.
for path, symlink in six.iteritems(symlink_map):
if path == symlink:
# Skip paths that aren't going to be symlinked.
continue
safe_mkdir(os.path.dirname(symlink))
try:
os.symlink(path, symlink)
except OSError as e:
# We don't delete and recreate the symlink, as this may break concurrently executing code.
if e.errno != errno.EEXIST:
raise
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(symlink_map.values())))
return dict(symlink_map)
@classmethod
def xml_report_path(cls, cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:API: public
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, conf, path):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:API: public
:param string conf: the ivy conf name (e.g. "default")
:param string path: The path to the ivy report file.
:returns: The info in the xml report.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers))
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
pinned_artifacts=None, jar_dep_manager=None):
if not resolve_hash_name:
resolve_hash_name = Target.maybe_readable_identify(targets)
return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
jar_dep_manager)
@classmethod
def _generate_resolve_ivy(cls, jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts=None,
jar_dep_manager=None):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = jar_dep_manager or JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(pinned_artifacts) # Copy, because we're modifying it.
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
jars[i] = dep.copy(rev=coord.rev)
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(_coord) for _coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
"""Generates an ivy xml with all jars marked as intransitive using the all conflict manager."""
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
# Use org name _and_ rev so that we can have dependencies with different versions. This will
# allow for batching fetching if we want to do that.
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
@classmethod
def _write_ivy_xml_file(cls, ivyxml, template_data, template_relpath):
template_text = pkgutil.get_data(__name__, template_relpath)
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets):
"""Creates a consistent classpath and list of excludes for the passed targets.
It also modifies the JarDependency objects' excludes to contain all the jars excluded by
provides.
:param iterable targets: List of targets to collect JarDependencies and excludes from.
:returns: A pair of a list of JarDependencies, and a set of excludes to apply globally.
"""
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not target.is_exported:
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
new_jars = OrderedDict()
for coordinate, jar in jars.items():
new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes)
jars = new_jars
return jars.values(), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO: Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above so executing this step there instead may make more
# sense.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=artifacts.values(),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
@classmethod
def _generate_fetch_jar_template(cls, jars):
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
transitive=False,
mutable=jar.mutable,
force=True)
for jar in jars)
if len(global_dep_attributes) != 1:
# If we batch fetches and assume conflict manager all, we could ignore these.
# Leaving this here for now.
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
any_have_url = False
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.get_url()
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
artifacts=artifacts.values(),
any_have_url=any_have_url,
excludes=[])
return template
|
|
import importlib.util
import json
import os
from functools import lru_cache
from os.path import join
import pytest
from .scripts import standalone
pre_baked_images = {
"centos8": {
"registry": "docker.io",
"repo": "centos",
"tag": "centos8",
"digest": "sha256:85313b812ad747dd19cf18078795b576cc4ae9cd2ca2ccccd7b5c12722b2effd",
"image_source": "registry",
"schema_version": "2",
},
"alpine2.6": {
"registry": "docker.io",
"repo": "alpine",
"tag": "2.6",
"digest": "sha256:e9cec9aec697d8b9d450edd32860ecd363f2f3174c8338beb5f809422d182c63",
"image_source": "registry",
"schema_version": "1",
},
"lean": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "lean",
"digest": "sha256:8d0e40d8e013bb0cda3d279b5021c473885c079e94010fd2208235d56982486f",
"image_source": "registry",
"schema_version": "v2",
},
"py38": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "py38",
"digest": "sha256:65e79fb7397ed96bd84656a664ac9978057930d90b2d5fde5e92a58adbee657c",
"image_source": "registry",
"schema_version": "v2",
},
"npm": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "npm",
"digest": "sha256:905a2bf5f3adf8ba8f1d4391cfb4a3e6bd671e0b2ec2f488071679a5f578c7d7",
"image_source": "registry",
"schema_version": "v2",
},
"java": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "java",
"digest": "sha256:9f453a37ea62976dd0f6b8ca4da2010cc01c3988f2e8c290044576d936bae710",
"image_source": "registry",
"schema_version": "v2",
},
"go": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "engine-analyzer-golang-a8b30f2",
"digest": "sha256:d7efe8ef45def7a7aa6571de3cc5857281b1d7dc5477e7e0cbff6ccb2d5f5f8c",
"image_source": "registry",
"schema_version": "v2",
},
"stretch-slim": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "debian-stretch-slim",
"digest": "sha256:cd74be1a65a7c7f07aa9952f622097a6452012fea741fbdade0e763edaa55ba0",
"image_source": "registry",
"schema_version": "v2",
},
"rpm": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "centos8",
"digest": "sha256:96d136c9cbaf22d73010e3e79e748e7772143fd9a584f8898d2f122cc5da1206",
"image_source": "registry",
"schema_version": "v2",
},
"busybox": {
"registry": "docker.io",
"repo": "busybox",
"tag": "1.32.0-glibc",
"digest": "sha256:6e6d13055ed81b7144afaad15150fc137d4f639482beb311aaa097bc57e3cb80",
"image_source": "registry",
"schema_version": "v2",
},
# skopeo inspect --override-os linux docker://anchore/test_images@sha256:bf25131f6f6ba5ca531b2075424bfb25c36cc01f8e83cc3c759c404870a64e38 --raw
"bin": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "bin",
"digest": "sha256:bf25131f6f6ba5ca531b2075424bfb25c36cc01f8e83cc3c759c404870a64e38",
"image_source": "registry",
"schema_version": "v2",
},
# skopeo inspect --override-os linux docker://anchore/test_images@sha256:bfbc9520743a4601da82c24958e194d55e45b8cab7c5b466f6ac81c90308749f --raw
"ownership-overlap": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "ownership-overlap",
"digest": "sha256:bfbc9520743a4601da82c24958e194d55e45b8cab7c5b466f6ac81c90308749f",
"image_source": "registry",
"schema_version": "v2",
},
"suids": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "suids",
"digest": "sha256:1d0df8e380b947e9f76a1082cc550c3634dbbcfeb78e4c4874eeb149f377326d",
"image_source": "registry",
"schema_version": "v2",
},
"secrets": {
"registry": "docker.io",
"repo": "anchore/test_images",
"tag": "secrets",
"digest": "sha256:0be667e0698fb204d2a6eaf42be8bf15db7edaf256c07e40caecbbcdbf6aad52",
"image_source": "registry",
"schema_version": "v2",
},
}
def create_cache_directories(
registry=None,
repo=None,
digest=None,
image_source=None,
schema_version=None,
cache_root=None,
**kw
):
"""
Create a set of directories needed to save the data, skip creation if they
are there
"""
# The digest needs to get split because otherwise the analyzer splits on
# ':' creating a path that is incorrect and causing failures. So this gets
# split here, avoiding the problem
digest = digest.split(":")[-1]
relative_cache_path = "{image_source}/{registry}/{repo}/{digest}/{schema_version}"
relative_cache_path = relative_cache_path.format(
image_source=image_source,
registry=registry,
repo=repo,
digest=digest,
schema_version=schema_version,
)
cache_path = join(cache_root, relative_cache_path)
os.makedirs(cache_path, exist_ok=True)
return cache_path
@pytest.fixture
def hints_image(monkeypatch, tmpdir):
"""
This fixture is *very* expensive. Sorry. There is no way around it. The
hintsfile functionality requires the image to be analyzed every single
time. Compensate with the smallest images possible
"""
def func(contents, image):
work_dir = tmpdir.strpath
path = os.path.join(work_dir, "anchore_hints.json")
with open(path, "w") as _f:
json.dump(contents, _f)
monkeypatch.setenv("ANCHORE_TEST_HINTSFILE", path)
image_kwargs = pre_baked_images[image]
standalone.main(
work_dir=work_dir,
localconfig={"services": {"analyzer": {"enable_hints": True}}},
**image_kwargs
)
results_path = join(work_dir, "result.py")
spec = importlib.util.spec_from_file_location(
"functional_results", results_path
)
functional_results = importlib.util.module_from_spec(spec)
spec.loader.exec_module(functional_results)
# After importing the result as a Python module, the standalone script
# will assign the return value to `result` which is a single item in
# a list. Return the first item in that list. If this ever fails, is
# because the `result.py` file doesn't comply with that format
return functional_results.result[0]
return func
@pytest.fixture(scope="session")
def analyzed_data(request):
@lru_cache(maxsize=10)
def retrieve_cache(image="centos8"):
"""
The cache path gets computed by looking at the path, composed from all the arguments::
<image_source>/<registry>/<repo>/<split digest>/<schema_version>
For example::
registry/docker.io/centos/85a8df7bk3j28d7f0asd8/2
If the cache is a MISS, then the `analyze_image()` will get called to
produce a result module than it will get loaded and returned.
The cache can be blown away by using ``pytest --cache-clear``. Absolute
path to the cache directory is at the root of the project:
{ROOT}/.pytest_cache/d/analyzer
"""
image_kwargs = pre_baked_images[image]
cache_root = request.config.cache.makedir("analyzer").strpath
cache_path = create_cache_directories(cache_root=cache_root, **image_kwargs)
results_path = join(cache_path, "result.py")
if not os.path.exists(results_path):
standalone.main(work_dir=cache_path, **image_kwargs)
spec = importlib.util.spec_from_file_location(
"functional_results", results_path
)
functional_results = importlib.util.module_from_spec(spec)
spec.loader.exec_module(functional_results)
# After importing the result as a Python module, the standalone script
# will assign the return value to `result` which is a single item in
# a list. Return the first item in that list. If this ever fails, is
# because the `result.py` file doesn't comply with that format
return functional_results.result[0]
return retrieve_cache
|
|
#!/usr/bin/env python
################################################################################
# AUTHOR: Miguel A. Ibarra <miguelib@ufl.edu>
# DESCRIPTION: Take a a wide format file and perform a random forest analysis.
################################################################################
import os
import logging
import argparse
import warnings
from argparse import RawDescriptionHelpFormatter
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from matplotlib.backends.backend_pdf import PdfPages
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
from secimtools.visualManager.module_bar import quickHBar
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def getOptions(myopts=None):
""" Function to pull in arguments """
description = """ Random Forest """
parser = argparse.ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
# Standard Input
standard = parser.add_argument_group(title='Standard input',
description='Standard input for SECIM tools.')
standard.add_argument("-i", "--input", dest="input", action='store',
required=True, help="Input dataset in wide format.")
standard.add_argument("-d", "--design", dest="design", action='store',
required=True, help="Design file.")
standard.add_argument("-id", "--ID", dest="uniqID", action='store',
required=True, help="Name of the column with unique"\
" identifiers on wide file.")
standard.add_argument("-g", "--group", dest="group", action='store',
required=True, help="Group/treatment identifier in "\
"design file.")
standard.add_argument("-l","--levels",dest="levels",action="store",
required=False, default=False, help="Different groups to"\
" sort by separeted by commas.")
# Tool Input
tool = parser.add_argument_group(title='Tool specific input',
description='Optional/Specific input for the tool.')
tool.add_argument("-s","--snum", dest="snum", action='store', type=int,
required=False,default=1000,help="Number of estimators.")
tool.add_argument("-n","--num", dest="num", action='store', type=int,
required=False,default=20,help="Number of varibles to"\
"plot ont Variable Importance Plot")
# Tool Output
output = parser.add_argument_group(title='Required output')
output.add_argument("-o","--out", dest="oname", action='store',
required=True, help="Output file name.")
output.add_argument("-o2","--out2", dest="oname2", action='store',
required=True, help="Output file name.")
output.add_argument("-f","--figure",dest="figure",action="store",
required=False,help="Name of output file to store "\
"feature importance plots for the model")
# Plot Options
plot = parser.add_argument_group(title='Plot options')
plot.add_argument("-pal","--palette",dest="palette",action='store',required=False,
default="sequential", help="Name of the palette to use.")
plot.add_argument("-col","--color",dest="color",action="store",required=False,
default="Blues_9", help="Name of a valid color"
" scheme on the selected palette")
args = parser.parse_args()
# Standardize paths
args.input = os.path.abspath(args.input)
args.oname = os.path.abspath(args.oname)
args.design = os.path.abspath(args.design)
args.oname2 = os.path.abspath(args.oname2)
args.figure = os.path.abspath(args.figure)
if args.levels:
args.levels = args.levels.split(",")
return(args)
def plotVarImportance(palette, data, pdf, var):
"""
Runs LDA over a wide formated dataset
:Arguments:
:type scores: pandas.DataFrame
:param scores: Scores of the LDA.
:type pdf: pdf object
:param pdf: PDF object to save all the generated figures.
:type var: int
:param var: Number of variables to plot.
:Returns:
:rtype scores_df: pandas.DataFrame
:return scores_df: Scores of the LDA.
"""
# Subset data upToTheNumberOf Features
data=data[:var]
# Sort data
data=data.sort_values(by="ranked_importance", ascending=True, axis=0)
# Creating a figure handler instance
fh = figureHandler(proj='2d', figsize=(8,8))
# Chomp palette
palette.chompColors(start=3,end=palette.number)
# Get color list
colors = palette.getColorsCmapPalette(data["ranked_importance"])
# Multiply by 100 to get percentages instead of proportions
data["ranked_importance"] = data["ranked_importance"]*100
# Creating plot
quickHBar(ax=fh.ax[0], values=data["ranked_importance"],
xticks=data["feature"], colors=colors, lw=0)
# Formatting axis
fh.formatAxis(figTitle="Variable Importance Plot", xTitle="%", grid=False,
yTitle="Features")
# Adding figure to pdf
fh.addToPdf(dpi=600,pdfPages=pdf)
def runRFC(data, group, revertStr, origStr, nStim):
"""
Runs Random Forest Classifier and outputs tables for further export or use.
:Arguments:
:type dat: interface.wideToDesign
:param dat: wideToDesign instance providing acces to all input data.
:type nStim: int
:param nStim: Number Stimations.
:Returns:
:rtype scores_df: pandas.DataFrame
:return scores_df: Scores of the LDA.
"""
# Drope nans from data.
data.dropna(axis=1, inplace=True)
# Pull classifications out of dataset.
classes = data[group].copy()
# Remove class column from data.
data.drop(group, axis=1, inplace=True)
# Build Random Forest classifier
rfc_model = RandomForestClassifier(n_estimators=nStim)
rfc_model.fit(data, classes)
# Identify features and creating a dataFrame for it
df_importance = pd.DataFrame([data.columns, rfc_model.feature_importances_],
index=['feature', 'ranked_importance']).T
# Sort the dataFrame by importance
df_importance = df_importance.sort_values(by="ranked_importance", axis=0,
ascending=False)
# Get unfiltered names for importnace (look at interface for more detail)
df_rev = df_importance.applymap(lambda x: revertStr(x))
# Select data based on features
data = data[df_importance["feature"].tolist()]
# Create a dataframe for the selected data
reverted_columns = [origStr[x] for x in data.columns]
data.columns = reverted_columns
# Convert Series to dataFrame
df_classes = pd.DataFrame(classes)
# Reset index on classifications DataFrame
df_classes.reset_index(inplace=True)
# Join classifications to the transformed data
data = df_classes.join(data, on='sampleID')
return(df_rev, data, df_importance)
def main(args):
"""Perform the Random Forest analysys"""
# Set the color palette
palette = colorHandler(pal=args.palette, col=args.color)
logger.info("Using {0} color scheme from {1} palette".format(args.color, args.palette))
# Import data through interface
dat = wideToDesign(args.input, args.design, args.uniqID, args.group,
anno=args.levels, clean_string=True, logger=logger)
# Cleaning from missing data
dat.dropMissing()
# Select remaining sample ids for dataframe filtering
sample_ids = dat.wide.index.tolist()
# Grab the group column to extract classes from
sample_ids.append(dat.group)
data = dat.transpose().loc[:, sample_ids]
# Run Random Forest Classifier on data.
logger.info('Creating classifier')
df_rev, df_transf, df_importance = runRFC(data, dat.group, dat.revertStr, dat.origString, nStim=args.snum)
# Plot feature importances
logger.info('Plotting Variable Importance Plot')
with PdfPages(args.figure) as pdfOut:
plotVarImportance(palette, data=df_importance, pdf=pdfOut, var=args.num)
# Exporting Transformed data and df_rev data
logger.info('Exporting data to TSV format')
df_transf.to_csv(args.oname, index=False, sep='\t', float_format="%.4f")
df_rev.to_csv(args.oname2, index=False, sep='\t')
if __name__ == '__main__':
"""Tool is called on the command-line"""
args = getOptions()
logger = logging.getLogger()
sl.setLogger(logger)
logger.info("Importing data with following parameters: "\
"\n\tWide: {0}"\
"\n\tDesign: {1}"\
"\n\tUnique ID: {2}"\
"\n\tGroup Column: {3}".format(args.input, args.design,
args.uniqID, args.group))
warnings.filterwarnings("ignore", category=DeprecationWarning)
main(args)
|
|
# This file implements a MultiVolumeVisual class that can be used to show
# multiple volumes simultaneously. It is derived from the original VolumeVisual
# class in vispy.visuals.volume, which is releaed under a BSD license included
# here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the BSD license given in the LICENSE
# file in this repository.
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from glue.external import six
from glue.utils import iterate_chunks
from ..extern.vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from ..extern.vispy.visuals import VolumeVisual, Visual
from ..extern.vispy.visuals.shaders import Function
from ..extern.vispy.color import get_colormap, Color
from ..extern.vispy.scene.visuals import create_visual_node
from .shaders import get_frag_shader, VERT_SHADER
NUMPY_LT_1_13 = LooseVersion(np.__version__) < LooseVersion('1.13')
class NoFreeSlotsError(Exception):
pass
class MultiVolumeVisual(VolumeVisual):
"""
Displays multiple 3D volumes simultaneously.
Parameters
----------
volumes : list of tuples
The volumes to show. Each tuple should contain three elements: the data
array, the clim values, and the colormap to use. The clim values should
be either a 2-element tuple, or None.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
n_volume_max : int
Absolute maximum number of volumes that can be shown.
"""
def __init__(self, n_volume_max=16, emulate_texture=False, bgcolor='white', resolution=256):
# Choose texture class
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
self._n_volume_max = n_volume_max
self._vol_shape = (resolution, resolution, resolution)
self._need_vertex_update = True
self._data_bounds = None
self.resolution = resolution
# We deliberately don't use super here because we don't want to call
# VolumeVisual.__init__
Visual.__init__(self, vcode=VERT_SHADER, fcode="")
self.volumes = defaultdict(dict)
# We turn on clipping straight away - the following variable is needed
# by _update_shader
self._clip_data = True
# Set up initial shader so that we can start setting shader variables
# that don't depend on what volumes are actually active.
self._update_shader()
# Set initial clipping parameters
self.shared_program['u_clip_min'] = [0, 0, 0]
self.shared_program['u_clip_max'] = [1, 1, 1]
# Set up texture vertices - note that these variables are required by
# the parent VolumeVisual class.
self._vertices = VertexBuffer()
self._texcoord = VertexBuffer(
np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1]], dtype=np.float32))
self._draw_mode = 'triangle_strip'
self._index_buffer = IndexBuffer()
self.shared_program['a_position'] = self._vertices
self.shared_program['a_texcoord'] = self._texcoord
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
# Set up the underlying volume shape and define textures
self._vol_shape = (resolution, resolution, resolution)
self.shared_program['u_shape'] = self._vol_shape
self.textures = []
for i in range(n_volume_max):
# Set up texture object
self.textures.append(tex_cls(self._vol_shape, interpolation='linear',
wrapping='clamp_to_edge'))
# Pass texture object to shader program
self.shared_program['u_volumetex_{0}'.format(i)] = self.textures[i]
# Make sure all textures are disabled
self.shared_program['u_enabled_{0}'.format(i)] = 0
self.shared_program['u_weight_{0}'.format(i)] = 1
# Don't use downsampling initially (1 means show 1:1 resolution)
self.shared_program['u_downsample'] = 1.
# Set up texture sampler
self.shared_program.frag['sampler_type'] = self.textures[0].glsl_sampler_type
self.shared_program.frag['sample'] = self.textures[0].glsl_sample
# Set initial background color
self.shared_program['u_bgcolor'] = Color(bgcolor).rgba
# Prevent additional attributes from being added
try:
self.freeze()
except AttributeError: # Older versions of VisPy
pass
def _update_shader(self, force=False):
shader = get_frag_shader(self.volumes, clipped=self._clip_data,
n_volume_max=self._n_volume_max)
# We only actually update the shader in OpenGL if the code has changed
# to avoid any overheads in uploading the new shader code
if force or getattr(self, '_shader_cache', None) != shader:
self.shared_program.frag = shader
self._shader_cache = shader
# The following methods change things which require the shader code to be updated
def allocate(self, label):
if label in self.volumes:
raise ValueError("Label {0} already exists".format(label))
index = self._free_slot_index
self.volumes[label] = {}
self.volumes[label]['index'] = index
self.shared_program['u_enabled_{0}'.format(index)] = 0
self._update_shader()
def deallocate(self, label):
if label not in self.volumes:
return # layer already deallocated
self.disable(label)
self.volumes.pop(label)
self._update_shader()
def set_clip(self, clip_data, clip_limits):
self._clip_data = int(clip_data)
if clip_data:
self.shared_program['u_clip_min'] = clip_limits[:3]
self.shared_program['u_clip_max'] = clip_limits[3:]
self._update_shader()
def set_multiply(self, label, label_other):
self.volumes[label]['multiply'] = label_other
self._update_shader()
# The following methods don't require any changes to the shader code, so we
# don't update the shader after setting the OpenGL variables.
def enable(self, label):
index = self.volumes[label]['index']
self.shared_program['u_enabled_{0}'.format(index)] = 1
def disable(self, label):
index = self.volumes[label]['index']
self.shared_program['u_enabled_{0}'.format(index)] = 0
def downsample(self):
min_dimension = min(self._vol_shape)
self.shared_program['u_downsample'] = min_dimension / 20
def upsample(self):
self.shared_program['u_downsample'] = 1.
def set_background(self, color):
self.shared_program['u_bgcolor'] = Color(color).rgba
def set_resolution(self, resolution):
self.resolution = resolution
self._vol_shape = (resolution, resolution, resolution)
self.shared_program['u_shape'] = self._vol_shape[::-1]
def set_cmap(self, label, cmap):
if isinstance(cmap, six.string_types):
cmap = get_colormap(cmap)
self.volumes[label]['cmap'] = cmap
index = self.volumes[label]['index']
self.shared_program.frag['cmap{0:d}'.format(index)] = Function(cmap.glsl_map)
def set_clim(self, label, clim):
# Avoid setting the same limits again
if 'clim' in self.volumes[label] and self.volumes[label]['clim'] == clim:
return
self.volumes[label]['clim'] = clim
if 'data' in self.volumes[label]:
self._update_scaled_data(label)
def set_weight(self, label, weight):
index = self.volumes[label]['index']
self.shared_program['u_weight_{0:d}'.format(index)] = weight
def set_data(self, label, data, layer=None):
if 'clim' not in self.volumes[label]:
raise ValueError("set_clim should be called before set_data")
# Avoid adding the same data again
if 'data' in self.volumes[label] and self.volumes[label]['data'] is data:
return
self.volumes[label]['data'] = data
self.volumes[label]['layer'] = layer
self._update_scaled_data(label)
def _update_scaled_data(self, label):
# If the data slice hasn't been set yet, we should stop here
if self._data_bounds is None:
return
index = self.volumes[label]['index']
clim = self.volumes[label].get('clim', None)
data = self.volumes[label]['data']
# With certain graphics cards, sending the data in one chunk to OpenGL
# causes artifacts in the rendering - see e.g.
# https://github.com/vispy/vispy/issues/1412
# To avoid this, we process the data in chunks. Since we need to do
# this, we can also do the copy and renormalization on the chunk to
# avoid excessive memory usage.
# To start off we need to tell the texture about the new shape
self.shared_program['u_volumetex_{0:d}'.format(index)].resize(data.shape)
# Determine the chunk shape - the value of 128 as the minimum value
# is arbitrary but appears to work nicely. We can reduce that in future
# if needed.
sliced_data = data.compute_fixed_resolution_buffer(self._data_bounds)
chunk_shape = [min(x, 128, self.resolution) for x in sliced_data.shape]
# FIXME: shouldn't be needed!
zeros = np.zeros(self._vol_shape, dtype=np.float32)
self.shared_program['u_volumetex_{0:d}'.format(index)].set_data(zeros)
# Now loop over chunks
for view in iterate_chunks(sliced_data.shape, chunk_shape=chunk_shape):
chunk = sliced_data[view]
chunk = chunk.astype(np.float32)
if clim is not None:
chunk -= clim[0]
chunk *= 1 / (clim[1] - clim[0])
# PERF: nan_to_num doesn't actually help memory usage as it runs
# isnan internally, and it's slower, so we just use the following
# methind. In future we could do this directly with a C extension.
chunk[np.isnan(chunk)] = 0.
offset = tuple([s.start for s in view])
if chunk.size == 0:
continue
self.shared_program['u_volumetex_{0:d}'.format(index)].set_data(chunk, offset=offset)
def label_for_layer(self, layer):
for label in self.volumes:
if 'layer' in self.volumes[label]:
if self.volumes[label]['layer'] is layer:
return label
@property
def has_free_slots(self):
try:
self._free_slot_index
except NoFreeSlotsError:
return False
else:
return True
@property
def _free_slot_index(self):
indices = [self.volumes[label]['index'] for label in self.volumes]
for index in range(self._n_volume_max):
if index not in indices:
return index
raise NoFreeSlotsError("No free slots")
def _update_slice_transform(self, x_min, x_max, y_min, y_max, z_min, z_max):
# TODO: simplify this to get bounds, for FRB
x_step = (x_max - x_min) / self.resolution
y_step = (y_max - y_min) / self.resolution
z_step = (z_max - z_min) / self.resolution
data_bounds = [(z_min, z_max, self.resolution),
(y_min, y_max, self.resolution),
(x_min, x_max, self.resolution)]
# We should stop at this point if the bounds are the same as before
if data_bounds == self._data_bounds:
return
else:
self._data_bounds = data_bounds
self.transform.inner.scale = [x_step, y_step, z_step]
self.transform.inner.translate = [x_min, y_min, z_min]
# We need to update the data in OpenGL if the slice has changed
for label in self.volumes:
self._update_scaled_data(label)
# The following is needed to make sure that VisPy recognizes the changes
# to the transforms.
self.transform._update_shaders()
self.transform.update()
@property
def enabled(self):
return [self.shared_program['u_enabled_{0}'.format(i)] == 1
for i in range(self._n_volume_max)]
def draw(self):
if not any(self.enabled):
return
else:
try:
super(MultiVolumeVisual, self).draw()
except Exception:
pass
MultiVolume = create_visual_node(MultiVolumeVisual)
|
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from models import *
from groupdocs.FileStream import FileStream
from groupdocs.ApiClient import ApiException
class SystemApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
self.__basePath = "https://dev-api.groupdocs.com/v2.0"
@property
def basePath(self):
return self.__basePath
@basePath.setter
def basePath(self, value):
self.__basePath = value
def GetUserPlan(self, callerId, **kwargs):
"""Get user plan
Args:
callerId, str: User GUID (required)
Returns: GetPlanResponse
"""
if( callerId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['callerId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetUserPlan" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{callerId}/plan'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('callerId' in params):
replacement = str(self.apiClient.toPathValue(params['callerId']))
resourcePath = resourcePath.replace('{' + 'callerId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetPlanResponse')
return responseObject
def GetUserSubscriptionPlan(self, callerId, **kwargs):
"""Get user plan
Args:
callerId, str: User GUID (required)
Returns: GetUserSubscriptionPlanResponse
"""
if( callerId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['callerId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetUserSubscriptionPlan" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{callerId}/subscription'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('callerId' in params):
replacement = str(self.apiClient.toPathValue(params['callerId']))
resourcePath = resourcePath.replace('{' + 'callerId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetUserSubscriptionPlanResponse')
return responseObject
def GetSubscriptionPlans(self, callerId, family, **kwargs):
"""Get subscription plans
Args:
callerId, str: User GUID (required)
family, str: Product Family Name (required)
Returns: GetSubscriptionPlansResponse
"""
if( callerId == None or family == None ):
raise ApiException(400, "missing required parameters")
allParams = ['callerId', 'family']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetSubscriptionPlans" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{callerId}/plans/{family}?invalidate={invalidate}'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('callerId' in params):
replacement = str(self.apiClient.toPathValue(params['callerId']))
resourcePath = resourcePath.replace('{' + 'callerId' + '}',
replacement)
if ('family' in params):
replacement = str(self.apiClient.toPathValue(params['family']))
resourcePath = resourcePath.replace('{' + 'family' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetSubscriptionPlansResponse')
return responseObject
def SetSubscriptionPlan(self, userId, productId, body, **kwargs):
"""Set subscription plan user plan
Args:
userId, str: User GUID (required)
productId, str: Product ID (required)
body, SubscriptionPlanInfo: Subscription Plan (required)
Returns: SetUserSubscriptionPlanResponse
"""
if( userId == None or productId == None or body == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'productId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method SetSubscriptionPlan" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{userId}/subscriptions/{productId}'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('productId' in params):
replacement = str(self.apiClient.toPathValue(params['productId']))
resourcePath = resourcePath.replace('{' + 'productId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'SetUserSubscriptionPlanResponse')
return responseObject
def GetCountries(self, callerId, **kwargs):
"""Get countries
Args:
callerId, str: User GUID (required)
Returns: GetCountriesResponse
"""
if( callerId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['callerId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetCountries" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{callerId}/countries'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('callerId' in params):
replacement = str(self.apiClient.toPathValue(params['callerId']))
resourcePath = resourcePath.replace('{' + 'callerId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetCountriesResponse')
return responseObject
def GetStates(self, callerId, countryName, **kwargs):
"""Get states
Args:
callerId, str: User GUID (required)
countryName, str: Country Name (required)
Returns: GetStatesResponse
"""
if( callerId == None or countryName == None ):
raise ApiException(400, "missing required parameters")
allParams = ['callerId', 'countryName']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetStates" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{callerId}/countries/{countryName}/states'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('callerId' in params):
replacement = str(self.apiClient.toPathValue(params['callerId']))
resourcePath = resourcePath.replace('{' + 'callerId' + '}',
replacement)
if ('countryName' in params):
replacement = str(self.apiClient.toPathValue(params['countryName']))
resourcePath = resourcePath.replace('{' + 'countryName' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetStatesResponse')
return responseObject
def SetBillingAddress(self, userId, body, **kwargs):
"""Set user billing address
Args:
userId, str: User GUID (required)
body, BillingAddressInfo: Billing Address (required)
Returns: GetBillingAddressResponse
"""
if( userId == None or body == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method SetBillingAddress" % key)
params[key] = val
del params['kwargs']
resourcePath = '/system/{userId}/billingaddress'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetBillingAddressResponse')
return responseObject
|
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for soc.logic.helper.timeline.
"""
import unittest
from datetime import datetime
from datetime import timedelta
from soc.logic.helper import timeline
from soc.models import timeline as timeline_model
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class TimelineTest(unittest.TestCase):
"""Tests for timeline helper functions.
"""
def setUp(self):
self.timeline = seeder_logic.seed(timeline_model.Timeline)
def testIsBeforePeriod(self):
"""Tests if a correct bool is returned if the current DateTime is before
a given period_start.
"""
#program is yet to start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isBeforePeriod(self.timeline, 'program'))
#program has already started.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'program'))
#student signup period is yet to start.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isBeforePeriod(self.timeline, 'student_signup'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'other_event'))
def testIsBeforeEvent(self):
"""Tests if a correct bool is returned if current DateTime
is before a given event.
"""
#program has not started.
self.timeline.program_start = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline, 'program_start'))
#program has already started.
self.timeline.program_start = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'program_start'))
#program has not ended.
self.timeline.program_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline, 'program_end'))
#program has ended.
self.timeline.program_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'program_end'))
#the deadline to announce accepted organizations has not passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
+ timedelta(20))
self.assertTrue(timeline.isBeforeEvent(
self.timeline, "accepted_organization_announced_deadline"))
#the deadline to announce accepted organizations has been passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
- timedelta(20))
self.assertFalse(timeline.isBeforeEvent(
self.timeline, "accepted_organization_announced_deadline"))
#student sign up period has not started.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline,
'student_signup_start'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline,
'student_signup_start'))
#student sign up period has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline,
'student_signup_end'))
#student sign up period has already ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline,
'student_signup_end'))
#event not in the timeline.
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'other_event'))
def testIsActivePeriod(self):
"""Tests if a correct boolean is returned if the current DateTime is
between period_start and period_end.
"""
#program is going on.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.timeline.program_end = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isActivePeriod(self.timeline, 'program'))
#program will start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.timeline.program_end = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'program'))
#program has ended.
self.timeline.program_start = datetime.utcnow() - timedelta(20)
self.timeline.program_end = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'program'))
#student sign up period will start
self.timeline.student_signup_start = datetime.utcnow() + timedelta(10)
self.timeline.student_signup_end = datetime.utcnow() + timedelta(30)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'student_signup'))
#student sign up period has not ended.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(10)
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isActivePeriod(self.timeline,
'student_signup'))
#student sign up period has already ended.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(30)
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isActivePeriod(self.timeline,
'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isActivePeriod(self.timeline, 'other_event'))
def testActivePeriod(self):
"""Tests if the start and end of a specified period is returned.
"""
start = datetime(2011, 4, 3)
end = datetime(2020, 4, 3)
self.timeline.program_start = start
self.timeline.program_end = end
actual = timeline.activePeriod(self.timeline, 'program')
expected = (start, end)
self.assertEqual(actual, expected)
start = datetime(2011, 7, 4)
end = datetime(2021, 7, 5)
self.timeline.student_signup_start = start
self.timeline.student_signup_end = end
actual = timeline.activePeriod(self.timeline, 'student_signup')
expected = (start, end)
self.assertEqual(actual, expected)
#event not in the timeline.
expected = (None, None)
actual = timeline.activePeriod(self.timeline, 'some_other_event')
self.assertEqual(actual, expected)
def testIsAfterPeriod(self):
"""Tests if True is returned if current DateTime is after period_end.
"""
#program has ended.
self.timeline.program_end = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterPeriod(self.timeline, 'program'))
#program has not ended.
self.timeline.program_end = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'program'))
#student sign up has ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterPeriod(self.timeline, 'student_signup'))
#student sign up has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'some_other_event'))
def testIsAfterEvent(self):
"""Tests if True is returned if current DateTime is after the given event.
"""
#program has started.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterEvent(self.timeline, 'program_start'))
#program is yet to start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterEvent(self.timeline, 'program_start'))
#program has ended.
self.timeline.program_start = datetime.utcnow() - timedelta(30)
self.timeline.program_end = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline, 'program_end'))
#the deadline to announce accepted organizations has not passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
+ timedelta(20))
self.assertFalse(timeline.isAfterEvent(
self.timeline, "accepted_organization_announced_deadline"))
#the deadline to announce accepted organizations has been passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
- timedelta(20))
self.assertTrue(timeline.isAfterEvent(
self.timeline, "accepted_organization_announced_deadline"))
#student sign up period has not started.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isAfterEvent(self.timeline,
'student_signup_start'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline,
'student_signup_start'))
#student sign up period has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isAfterEvent(self.timeline,
'student_signup_end'))
#student sign up period has already ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline,
'student_signup_end'))
#event not in the Timeline.
self.assertFalse(timeline.isAfterEvent(self.timeline, 'some_other_event'))
def testGetDateTimeByname(self):
"""Tests that a DateTime property with a given name is returned.
"""
self.timeline.program_start = datetime(2011, 7, 1)
#name is available in the timeline.
name = 'program_start'
entity = self.timeline
expected = self.timeline.program_start
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.program_end = datetime(2012, 7, 4)
name = 'program_end'
entity = self.timeline
expected = self.timeline.program_end
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.student_signup_start = datetime(2011, 9, 5)
name = 'student_signup_start'
entity = self.timeline
expected = self.timeline.student_signup_start
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.student_signup_end = datetime(2011, 12, 4)
name = 'student_signup_end'
entity = self.timeline
expected = self.timeline.student_signup_end
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.accepted_organization_announced_deadline = datetime(2011, 5, 4)
name = 'accepted_organization_announced_deadline'
entity = self.timeline
expected = self.timeline.accepted_organization_announced_deadline
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
#name is not available in the timeline.
name = 'some_name'
entity = self.timeline
expected = None
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(expected, actual)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""ResNets, implemented in Gluon."""
from __future__ import division
__all__ = ['ResNetV1', 'ResNetV2',
'BasicBlockV1', 'BasicBlockV2',
'BottleneckV1', 'BottleneckV2',
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'get_resnet']
from ....context import cpu
from ...block import HybridBlock
from ... import nn
# Helpers
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
# Blocks
class BasicBlockV1(HybridBlock):
r"""BasicBlock V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(_conv3x3(channels, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels, 1, channels))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(residual+x, act_type='relu')
return x
class BottleneckV1(HybridBlock):
r"""Bottleneck V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, stride, channels//4))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(x + residual, act_type='relu')
return x
class BasicBlockV2(HybridBlock):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = _conv3x3(channels, stride, in_channels)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels, 1, channels)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
return x + residual
class BottleneckV2(HybridBlock):
r"""Bottleneck V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = nn.Conv2D(channels//4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels//4, stride, channels//4)
self.bn3 = nn.BatchNorm()
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = F.Activation(x, act_type='relu')
x = self.conv3(x)
return x + residual
# Nets
class ResNetV1(HybridBlock):
r"""ResNet V1 model from
`"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV1, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 3))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False,
in_channels=3))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=channels[i]))
self.classifier = nn.HybridSequential(prefix='')
self.classifier.add(nn.GlobalAvgPool2D())
self.classifier.add(nn.Flatten())
self.classifier.add(nn.Dense(classes, in_units=channels[-1]))
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.classifier(x)
return x
class ResNetV2(HybridBlock):
r"""ResNet V2 model from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.BatchNorm(scale=False, center=False))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 3))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False,
in_channels=3))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=in_channels))
in_channels = channels[i+1]
self.classifier = nn.HybridSequential(prefix='')
self.classifier.add(nn.BatchNorm())
self.classifier.add(nn.Activation('relu'))
self.classifier.add(nn.GlobalAvgPool2D())
self.classifier.add(nn.Flatten())
self.classifier.add(nn.Dense(classes, in_units=in_channels))
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.classifier(x)
return x
# Specification
resnet_spec = {18: ('basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: ('basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: ('bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: ('bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: ('bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
resnet_net_versions = [ResNetV1, ResNetV2]
resnet_block_versions = [{'basic_block': BasicBlockV1, 'bottle_neck': BottleneckV1},
{'basic_block': BasicBlockV2, 'bottle_neck': BottleneckV2}]
# Constructor
def get_resnet(version, num_layers, pretrained=False, ctx=cpu(), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
block_type, layers, channels = resnet_spec[num_layers]
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('resnet%d_v%d'%(num_layers, version)), ctx=ctx)
return net
def resnet18_v1(**kwargs):
r"""ResNet-18 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(1, 18, **kwargs)
def resnet34_v1(**kwargs):
r"""ResNet-34 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(1, 34, **kwargs)
def resnet50_v1(**kwargs):
r"""ResNet-50 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(1, 50, **kwargs)
def resnet101_v1(**kwargs):
r"""ResNet-101 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(1, 101, **kwargs)
def resnet152_v1(**kwargs):
r"""ResNet-152 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(1, 152, **kwargs)
def resnet18_v2(**kwargs):
r"""ResNet-18 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(2, 18, **kwargs)
def resnet34_v2(**kwargs):
r"""ResNet-34 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(2, 34, **kwargs)
def resnet50_v2(**kwargs):
r"""ResNet-50 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(2, 50, **kwargs)
def resnet101_v2(**kwargs):
r"""ResNet-101 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(2, 101, **kwargs)
def resnet152_v2(**kwargs):
r"""ResNet-152 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
return get_resnet(2, 152, **kwargs)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import stat
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.postgresql.service import PgSqlApp
from trove.guestagent.strategies.backup import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
WAL_ARCHIVE_DIR = CONF.postgresql.wal_archive_location
class PgDump(base.BackupRunner):
"""Implementation of Backup Strategy for pg_dump."""
__strategy_name__ = 'pg_dump'
@property
def cmd(self):
cmd = 'sudo -u postgres pg_dumpall '
return cmd + self.zip_cmd + self.encrypt_cmd
class PgBaseBackupUtil(object):
def most_recent_backup_wal(self, pos=0):
"""
Return the WAL file for the most recent backup
"""
mrb = self.most_recent_backup_file(pos=pos)
return mrb.split(".")[0]
def most_recent_backup_file(self, pos=0):
"""
Look for the most recent .backup file that basebackup creates
:return: a string like 000000010000000000000006.00000168.backup
"""
walre = re.compile("[0-9A-F]{24}.*.backup")
b = [f for f in os.listdir(WAL_ARCHIVE_DIR)
if walre.search(f)]
b = sorted(b, reverse=True)
if not b:
return None
return b[pos]
def log_files_since_last_backup(self, pos=0):
"""Return the WAL files since the provided last backup
pg_archivebackup depends on alphanumeric sorting to decide wal order,
so we'll do so too:
https://github.com/postgres/postgres/blob/REL9_4_STABLE/contrib
/pg_archivecleanup/pg_archivecleanup.c#L122
"""
last_wal = self.most_recent_backup_wal(pos=pos)
d = os.listdir(WAL_ARCHIVE_DIR)
LOG.info("Using %s for most recent wal file" % last_wal)
LOG.info("wal archive dir contents" + str(d))
walre = re.compile("^[0-9A-F]{24}$")
wal = [f for f in d
if walre.search(f) and f >= last_wal]
return wal
class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil):
"""Base backups are taken with the pg_basebackup filesystem-level backup
tool pg_basebackup creates a copy of the binary files in the PostgreSQL
cluster data directory and enough WAL segments to allow the database to
be brought back to a consistent state. Associated with each backup is a
log location, normally indicated by the WAL file name and the position
inside the file.
"""
__strategy_name__ = 'pg_basebackup'
def __init__(self, *args, **kwargs):
self._app = None
super(PgBaseBackup, self).__init__(*args, **kwargs)
self.label = None
self.stop_segment = None
self.start_segment = None
self.start_wal_file = None
self.stop_wal_file = None
self.checkpoint_location = None
self.mrb = None
@property
def app(self):
if self._app is None:
self._app = self._build_app()
return self._app
def _build_app(self):
return PgSqlApp()
@property
def cmd(self):
cmd = "pg_basebackup -h %s -U %s --pgdata=-" \
" --label=%s --format=tar --xlog " % \
(self.app.pgsql_run_dir, self.app.ADMIN_USER,
self.base_filename)
return cmd + self.zip_cmd + self.encrypt_cmd
def base_backup_metadata(self, f):
"""Parse the contents of the .backup file"""
meta = {}
operating_system.chmod(f, FileMode(add=[stat.S_IROTH]), as_root=True)
start_re = re.compile("START WAL LOCATION: (.*) \(file (.*)\)")
stop_re = re.compile("STOP WAL LOCATION: (.*) \(file (.*)\)")
checkpt_re = re.compile("CHECKPOINT LOCATION: (.*)")
label_re = re.compile("LABEL: (.*)")
with open(f, 'r') as base_metadata:
lines = "\n".join(base_metadata.readlines())
match = start_re.search(lines)
if match:
self.start_segment = meta['start-segment'] = match.group(1)
self.start_wal_file = meta['start-wal-file'] = match.group(2)
match = stop_re.search(lines)
if match:
self.stop_segment = meta['stop-segment'] = match.group(1)
self.stop_wal_file = meta['stop-wal-file'] = match.group(2)
match = checkpt_re.search(lines)
if match:
self.checkpoint_location \
= meta['checkpoint-location'] = match.group(1)
match = label_re.search(lines)
if match:
self.label = meta['label'] = match.group(1)
return meta
def check_process(self):
"""If any of the below variables were not set by either metadata()
or direct retrieval from the pgsql backup commands, then something
has gone wrong
"""
if not self.start_segment or not self.start_wal_file:
LOG.info("Unable to determine starting WAL file/segment")
return False
if not self.stop_segment or not self.stop_wal_file:
LOG.info("Unable to determine ending WAL file/segment")
return False
if not self.label:
LOG.info("No backup label found")
return False
return True
def metadata(self):
"""pg_basebackup may complete, and we arrive here before the
history file is written to the wal archive. So we need to
handle two possibilities:
- this is the first backup, and no history file exists yet
- this isn't the first backup, and so the history file we retrieve
isn't the one we just ran!
"""
def _metadata_found():
LOG.debug("Polling for backup metadata... ")
self.mrb = self.most_recent_backup_file()
if not self.mrb:
LOG.debug("No history files found!")
return False
meta = self.base_backup_metadata(
os.path.join(WAL_ARCHIVE_DIR, self.mrb))
LOG.debug("Label to pg_basebackup: %s label found: %s" %
(self.base_filename, meta['label']))
LOG.info(_("Metadata for backup: %s.") % str(meta))
return meta['label'] == self.base_filename
try:
utils.poll_until(_metadata_found, sleep_time=5, time_out=60)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout waiting for backup metadata for"
" backup %s") % self.base_filename)
return self.base_backup_metadata(
os.path.join(WAL_ARCHIVE_DIR, self.mrb))
def _run_post_backup(self):
"""Get rid of WAL data we don't need any longer"""
arch_cleanup_bin = os.path.join(self.app.pgsql_extra_bin_dir,
"pg_archivecleanup")
f = os.path.basename(self.most_recent_backup_file())
cmd_full = " ".join((arch_cleanup_bin, WAL_ARCHIVE_DIR, f))
out, err = utils.execute("sudo", "su", "-", self.app.pgsql_owner,
"-c", "%s" % cmd_full)
class PgBaseBackupIncremental(PgBaseBackup):
"""To restore an incremental backup from a previous backup, in PostgreSQL,
is effectively to replay the WAL entries to a designated point in time.
All that is required is the most recent base backup, and all WAL files
"""
def __init__(self, *args, **kwargs):
LOG.info("Incr instantiated with args/kwargs %s %s " %
(str(args), str(kwargs)))
if not kwargs.get('parent_location'):
raise AttributeError('Parent missing!')
super(PgBaseBackupIncremental, self).__init__(*args, **kwargs)
self.parent_location = kwargs.get('parent_location')
self.parent_checksum = kwargs.get('parent_checksum')
def _run_pre_backup(self):
self.backup_label = self.base_filename
self.start_segment = self.app.pg_start_backup(self.backup_label)
self.start_wal_file = self.app.pg_xlogfile_name(self.start_segment)
self.stop_segment = self.app.pg_stop_backup()
# We have to hack this because self.command is
# initialized in the base class before we get here, which is
# when we will know exactly what WAL files we want to archive
self.command = self._cmd()
def _cmd(self):
# TODO(atomic77) Store the list of files in a var but this should
# be written to a file to ensure we don't get into cmd line
# overflow issues when this file list gets bigger
wal_file_list = self.log_files_since_last_backup(pos=1)
LOG.info("Got wal file list: " + str(wal_file_list))
c = 'sudo tar -cf - -C {wal_dir} {wal_list} '.format(
wal_dir=WAL_ARCHIVE_DIR,
wal_list=" ".join(wal_file_list))
return c + self.zip_cmd + self.encrypt_cmd
def metadata(self):
_meta = super(PgBaseBackupIncremental, self).metadata()
LOG.info(_("Metadata grabbed from super class: %s.") % str(_meta))
_meta.update({
'parent_location': self.parent_location,
'parent_checksum': self.parent_checksum,
})
LOG.info("Returning metadata for incr: " + str(_meta))
return _meta
|
|
#Copyright 2016 Tim Wentlau.
#Distributed under the MIT License. See LICENSE in root of project.
"""
Sensor handling in Kervi is split in two parts.
The first part is sensor device drivers that handles physical access to sensor hardware.
The second part is the Sensor class that reads a sensor device and triggers events, store readings to DB.
"""
import time
from kervi.controllers import Controller
from kervi.core.utility.thread import KerviThread
from kervi.spine import Spine
from kervi.values import NumberValue, ColorValue, KerviValue
from kervi.config import Configuration
#from kervi.actions import action
#from kervi.settings import Settings
class Sensor(Controller):
r"""
Sensor is a class that exposes a sensor device as a KerviValue.
The Sensor class polls the associated sensor device and updates it self when
the value of the sensor device change.
It is possible to link to other KerviValues and dashboards.
Some sensor devices are multi dimensional and each dimension are reached by a numeric index of the sensor it self.
:param sensor_id:
Id of the sensor. This id is never displayed it is used to reference the sensor in code.
:type sensor_id: ``str``
:param name:
Name of the sensor.
:type name: ``str``
:param device:
The sensor device that should be monitored. Could be one of the sensors from the kervi device library
or a sensor device driver that inherits from kervi.hal.SensorDeviceDriver
:type device: ``SensorDeviceDriver``
:Keyword Arguments:
* **polling_interval** (``float``) -- Polling interval in seconds. Zero disables polling.
"""
def __init__(self, sensor_id, name, device=None, **kwargs):
Controller.__init__(self, sensor_id, name, **kwargs)
self._device = device
self._sub_sensors = []
self._dimensions = 1
self._index = kwargs.pop("index", -1)
self._enabled = None
if device.value_type == "color":
self._sensor_value = self.outputs.add("value", name, ColorValue)
elif device.value_type == "number":
self._sensor_value = self.outputs.add(sensor_id, name, NumberValue)
else:
raise ValueError("Can not handle device value type: " + device.value_type)
if self._device:
self.value_type = self._device.type
self.value_unit = self._device.unit
self._sensor_value.type = self._device.type
self._sensor_value.unit = self._device.unit
self._sensor_value.min = self._device.min
self._sensor_value.max = self._device.max
if self._index == -1:
self._dimensions = self._device.dimensions
self._dimension_labels = self._device.dimension_labels
if self._dimensions > 1:
count = 0
for label in self._dimension_labels:
sub_sensor = Sensor(
self.component_id + "." + label,
label,
self._device,
use_thread=False,
#parent=self,
index=count,
**kwargs
)
sub_sensor.value_unit = self.value_unit
self._sub_sensors += [
sub_sensor
]
count += 1
if kwargs.get("use_thread", True):
self._sensor_thread = _SensorThread(self, kwargs.get("polling_interval", 1))
else:
self._sensor_thread = None
def __getitem__(self, sub_sensor):
if self._dimensions == 1:
return self
return self._sub_sensors[sub_sensor]
@property
def enabled(self):
return self._enabled == True
@enabled.setter
def enabled(self, value):
self._enabled = value
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].enable = value
@property
def polling_interval(self):
"""
The polling interval of the sensor in seconds.
:type: ``float``
"""
return self._sensor_thread.reading_interval
@polling_interval.setter
def reading_interval(self, interval):
self._sensor_thread.reading_interval = interval
@property
def sensor_id(self):
return self.component_id
@property
def device(self):
"""
The device that is linked to this sensor class.
It may be a hardware device from the kervi device library
or a class that inherits from kervi.hal.SensorDeviceDriver
"""
return self._device
@device.setter
def device(self, device):
self._device = device
def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
r"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
"""
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs)
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs)
# def _get_info(self, **kwargs):
# dimensions = []
# if self._dimensions > 1:
# for dimension in range(0, self._dimensions):
# dimensions += [self._sub_sensors[dimension]._get_component_info()]
# return {
# "type":self._sensor_value._type,
# "subSensors": dimensions,
# "isInput": False,
# "maxValue":self.max,
# "minValue":self.min,
# "unit":self.value_unit,
# "value":self._sensor_value._value,
# "value_type": self._device.value_type,
# "ranges":self._sensor_value._event_ranges,
# "sparkline":self._sensor_value._sparkline
# }
# @action
# def enable(self, state=True):
# self.enabled = state
# @enable.set_interrupt
# def enable_interrupt(self):
# self.enabled = False
def controller_start(self):
if self._enabled == None:
self.enabled = True
def _new_sensor_reading(self, sensor_value):
"""
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
"""
if not self._active and not self._enabled:
return
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value)
else:
self._sensor_value.value = sensor_value
def _read_sensor(self):
self._new_sensor_reading(self._device.read_value())
@property
def log_values(self):
"""
Set to true if the values should be logged to DB.
If false Kervi will hold a small cache in memory
for the last reading to be used in sparklines and real time charts.
"""
return self._sensor_value._log_values
@log_values.setter
def log_values(self, value):
self._sensor_value._log_values = value
# @property
# def persist_value(self):
# """
# Set to true if the current value should be saved to db when the Kervi application or module exits.
# The value will be restored upon application or module start.
# """
# return self._sensor_value._persist_value
# @persist_value.setter
# def persist_value(self, do_persist):
# self._sensor_value.persist_value = do_persist
def add_value_event(self, event_value, func, event_type=None, parameters=None, **kwargs):
"""
Add a function that is called when the value reach or pass the event_value.
:param event_value:
A single value or range specified as a tuple.
If it is a range the function specified in func is called when the value enters the range.
:type event_value: ``float``, ``string``, ``boolean`` or a tuple of these types.
:param func:
Function or lambda expression to be called.
This function will receive the dynamcic value as a parameter.
:param event_type:
String with the value "warning" of "error" or None (default).
If warning or error is specified the value or range are shown in UI.
:type event_type: ``str``
"""
self._sensor_value.add_value_event(event_value, func, event_type, parameters, **kwargs)
def add_normal_range(self, value, message=None, func=None, **kwargs):
self._sensor_value.add_value_event(value, self._sensor_value._handle_range_event, parameters=[message, func, 3], **kwargs)
def add_warning_range(self, value, message=None, func=None, **kwargs):
self._sensor_value.add_value_event(value, self._sensor_value._handle_range_event, event_type="warning", parameters=[message, func, 2], **kwargs)
def add_error_range(self, value, message=None, func=None, **kwargs):
self._sensor_value.add_value_event(value, self._sensor_value._handle_range_event, event_type="error", parameters=[message, func, 1], **kwargs)
@property
def delta(self):
"""
Enter how much a the value should change before it triggers changes events and updates links.
:type: ``float``
"""
return self._sensor_value.delta
@delta.setter
def delta(self, value):
self._sensor_value.delta = value
@property
def value(self):
return self._sensor_value.value
@property
def max(self):
"""
Maximum value.
:type: ``float``
"""
return self._sensor_value.max_value
@max.setter
def max(self, value):
self._sensor_value.max_value = value
@property
def min(self):
"""
Minimum value.
:type: ``float``
"""
return self._sensor_value.min_value
@min.setter
def min(self, value):
self._sensor_value.min_value = value
@property
def unit(self):
"""
Metric Unit of value.
:type: ``str``
"""
return self._sensor_value.unit
@unit.setter
def unit(self, value):
self._sensor_value.unit = value
@property
def display_unit(self):
"""
Display unit of value.
:type: ``str``
"""
return self._sensor_value.display_unit
@display_unit.setter
def display_unit(self, value):
self._sensor_value.display_unit = value
def set_ui_parameter(self, name, value):
self._sensor_value.set_ui_parameter(name, value)
class _SensorThread(KerviThread):
r"""
SensorThread is the base class that polls sensors.
Add one or more sensors and set polling interval.
:param sensors:
Id of the sensor.
This id is used in other components to reference this sesnor.
:type sensors: ``str``
:param reading_interval:
Polling interval in seconds between
:type reading_interval: ``float``
"""
def __init__(self, sensors, reading_interval=1):
KerviThread.__init__(self)
self.spine = Spine()
if self.spine:
self.spine.register_command_handler("startThreads", self._start_command)
self.spine.register_command_handler("stopThreads", self._stop_command)
self.alive = False
self.reading_interval = reading_interval
if hasattr(sensors, "__len__"):
self.sensors = sensors
else:
self.sensors = [sensors]
def new_sensor_reading(self, value, sensor_idx=0):
self.sensors[sensor_idx]._new_sensor_reading(value)
def _step(self):
for sensor in self.sensors:
sensor._read_sensor()
self.sensor_step()
time.sleep(self.reading_interval)
def sensor_step(self):
pass
def _start_command(self):
if not self.alive:
self.alive = True
KerviThread.start(self)
def _stop_command(self):
if self.alive:
self.alive = False
self.stop()
|
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kickstart helper functions used to build kickstart files."""
import logging
import os
from string import Template
class RepoString(object):
"""Creates a yum.conf repository section statement for a kickstart file.
See the yum.conf man pages for more information about formatting
requirements.
The attributes listed are the minimun data set for a repo section.
Attributes:
head: The header that should be used for the repo section.
name: The name as it will appear in yum.
baseurl: The url for the repo.
enabled: Set to 1 to enable.
gpgcheck: Set to 1 to enable.
repo_gpgcheck: Set to 0 to disable.
gpgkey: URLs pointing to GPG keys.
"""
url_root = 'https://packages.cloud.google.com/yum/repos'
gpgkey_list = [
'https://packages.cloud.google.com/yum/doc/yum-key.gpg',
'https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg'
]
# New repos should be added here. Create a dict for your repo bellow.
# This dict should contain the following:
# head: The header that should be used for the repo section.
# name: The name as it will appear in yum.
# url_branch: This is combined with url_root (defined in the class) and
# repo_version to create the repo's baseurl. You must include a string
# formatter '%s' to place the repo_version in your URL.
# e.g. /google-compute-engine-%s-x86_64-unstable
# filename: This is the location the yum.conf section file will live on the
# image.
repodict = {
'stable': {
'head': '[google-compute-engine]',
'name': 'Google Compute Engine',
'url_branch': '/google-compute-engine-%s-x86_64-stable',
'filename': '/etc/yum.repos.d/google-cloud.repo'
},
'sdk': {
'head': '[google-cloud-sdk]',
'name': 'Google Cloud SDK',
'url_branch': '/cloud-sdk-%s-x86_64',
'filename': '/etc/yum.repos.d/google-cloud.repo'
},
'unstable': {
'head': '[google-compute-engine-unstable]',
'name': 'Google Compute Engine Unstable',
'url_branch': '/google-compute-engine-%s-x86_64-unstable',
'filename': '/etc/yum.repos.d/google-cloud-unstable.repo'
},
'staging': {
'head': '[google-compute-engine-staging]',
'name': 'Google Compute Engine Staging',
'url_branch': '/google-compute-engine-%s-x86_64-staging',
'filename': '/etc/yum.repos.d/google-cloud-staging.repo'
}
}
def __init__(self, repo_version, repo):
"""Initializes RepoString with attributes passes as arguments.
Args:
repo_version: string; expects 'el7', 'el8'.
repo: string; used to specify which dict in repodict to use to assemble
the yum.conf repo segment.
repodata must contain the following:
head: The header that should be used for the repo entry.
name: The name as it will appear in yum.
url_branch: This is combined with url_root (defined in the class) and
repo_version to create the repo's baseurl. You must include a string
formatter '%s' to place the repo_version in your URL.
e.g. /google-compute-engine-%s-x86_64-unstable
Returns:
An initialized RepoString object.
"""
super(RepoString, self).__init__()
self.repo = repo
self.repo_version = repo_version
self.yumseg = {}
self.yumseg['head'] = self.repodict[self.repo]['head']
self.yumseg['name'] = self.repodict[self.repo]['name']
self.yumseg['baseurl'] = (
self.GetBaseURL(self.repodict[self.repo]['url_branch']))
self.yumseg['enabled'] = '1'
self.yumseg['gpgcheck'] = '1'
self.yumseg['repo_gpgcheck'] = '0'
self.yumseg['gpgkey'] = self.gpgkey_list
def __str__(self):
"""Override the string method to return a yum.conf repository section.
Returns:
RepoString; tell python to treat this as a string using str().
"""
keylist = ['head',
'name',
'baseurl',
'enabled',
'gpgcheck',
'repo_gpgcheck',
'gpgkey']
yum_repo_list = (
[('tee -a %s << EOM' % self.repodict[self.repo]['filename']), ])
for key in keylist:
if key == 'head':
yum_repo_list.append(self.yumseg[key])
elif key == 'gpgkey':
yum_repo_list.append('%s=%s' %
(key, '\n '.join(self.gpgkey_list)))
else:
yum_repo_list.append('%s=%s' % (key, self.yumseg[key]))
yum_repo_list.append('EOM')
return '\n'.join(yum_repo_list)
def GetBaseURL(self, url_branch):
"""Assembles the baseurl attribute of RepoString.
Proccesses the string formatting in url_branch then combines it with
url_root to create the baseurl.
Args:
url_branch: string; this is combined with url_root and repo_version to
create the repo's baseurl. You must include a string
formatter '%s' to place the repo_version in your URL.
e.g. /google-compute-engine-%s-x86_64-unstable
Returns:
string; baseurl
"""
return self.url_root + (url_branch % self.repo_version)
def BuildKsConfig(release, google_cloud_repo, byos, sap):
"""Builds kickstart config from shards.
Args:
release: string; image from metadata.
google_cloud_repo: string; expects 'stable', 'unstable', or 'staging'.
byos: bool; true if using a BYOS RHEL license.
sap: bool; true if building RHEL for SAP.
Returns:
string; a valid kickstart config.
"""
ks_options = ''
ks_packages = ''
ks_post = []
major = 0
minor = 0
rhel = False
rhel = release.startswith('rhel')
if release.startswith('rhel-7-') or release.startswith('rhel-8-'):
minor = release[-1]
if release.startswith('rhel-7') or release.startswith('centos-7'):
major = 7
if (release.startswith('rhel-8') or release.startswith('centos-8')
or release.startswith('centos-stream-8')
or release.startswith('almalinux-8')
or release.startswith('rocky-linux-8')):
major = 8
el_version = f'el{major}'
# Options and packages.
if rhel:
ks_options = FetchConfigPart(f'rhel-{major}-options.cfg')
else:
ks_options = FetchConfigPart(f'{release}-options.cfg')
ks_packages = FetchConfigPart(f'{el_version}-packages.cfg')
# Repos post.
ks_post.append(BuildReposPost(el_version, google_cloud_repo))
# RHEL specific posts.
if rhel:
pkg = 'yum' if major == 7 else 'dnf'
# Why do we only do this for SAP? Does the ordering matter?
# Minor version post.
if sap and minor:
templ = Template(FetchConfigPart('rhel-minor-post.cfg'))
ks_post.append(templ.substitute(pkg=pkg, minor=minor, major=major))
# RHEL common post.
templ = Template(FetchConfigPart('rhel-post.cfg'))
majors = f'{major}-sap' if sap else major
ks_post.append(templ.substitute(pkg=pkg, major=majors))
# SAP post.
if sap:
ks_post.append(FetchConfigPart(f'rhel-{major}-sap-post.cfg'))
# Common posts.
ks_post.append(FetchConfigPart(f'{el_version}-post.cfg'))
# RHEL BYOS post is a cleanup action and has to be after the common post.
if byos:
ks_post.append(FetchConfigPart('rhel-byos-post.cfg'))
# Common cleanup post.
ks_post.append(FetchConfigPart('cleanup.cfg'))
ks_file = [ks_options, ks_packages]
ks_file.append("\n".join(ks_post))
logging.info("Kickstart file: \n%s", ks_file)
# Return the joined kickstart file as a string.
return "\n".join(ks_file)
def BuildReposPost(repo_version, google_cloud_repo):
"""Creates a kickstart post macro with repos needed by GCE.
Args:
repo_version: string; expects 'el7', or 'el8'.
google_cloud_repo: string; expects 'stable', 'unstable', or 'staging'
Returns:
string; a complete %post macro that can be added to a kickstart file. The
output should look like the following example.
%post
tee -a /etc/yum.repos.d/example.repo << EOF
[example-repo]
name=Example Repo
baseurl=https://example.com/yum/repos/example-repo-ver-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://example.com/yum/doc/yum-key.gpg
https://example.com/yum/doc/rpm-package-key.gpg
EOF
...
%end
The values for enabled, gpgcheck, repo_gpgcheck, and gpgkey are constants.
the values for head, name, and baseurl can be modified to point to use any
repo that will accept the supplied gpg keys.
"""
# Build a list of repos that will be returned. All images will get the
# compute repo. EL7 images get the cloud SDK repo. The unstable, and staging
# repos can be added to either by setting the google_cloud_repo value.
repolist = ['stable']
if repo_version == 'el7' or repo_version == 'el8':
repolist.append('sdk')
if google_cloud_repo == 'unstable':
repolist.append('unstable')
if google_cloud_repo == 'staging':
repolist.append('staging')
filelist = ['%post']
for repo in repolist:
filelist.append(str(RepoString(repo_version, repo)))
filelist.append('%end')
return '\n'.join(filelist)
def FetchConfigPart(config_file):
"""Reads data from a kickstart file.
Args:
config_file: string; the name of a kickstart file shard located in
the 'kickstart' directory.
Returns:
string; contents of config_file should be a string with newlines.
"""
with open(os.path.join('files', 'kickstart', config_file)) as f:
return f.read()
|
|
#
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import fnmatch
import glob
import os
import os.path
import re
import shutil
import string
import subprocess
import sys
import imp
import base64
import json
import tempfile
import time
from Common import *
from os.path import join
from Utils.WAAgentUtil import waagent
from waagent import LoggerInit
import logging
import logging.handlers
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self, name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self._short_name = short_name
self.patching = None
self.disk_util = None
self.find_last_nonquery_operation = False
self.config_archive_folder = '/var/lib/azure_disk_encryption_archive'
def _get_log_prefix(self):
return '[%s-%s]' % (self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
if file.endswith('.settings'):
cur_seq_no = int(os.path.basename(file).split('.')[0])
if freshest_time == None:
freshest_time = os.path.getmtime(join(config_folder, file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder, file))
if current_file_m_time > freshest_time:
freshest_time = current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def get_last_seq(self):
if os.path.isfile('mrseq'):
seq = waagent.GetFileContents('mrseq')
if seq:
return int(seq)
return -1
def get_latest_seq(self):
settings_files = glob.glob(os.path.join(self._context._config_dir, '*.settings'))
settings_files = [os.path.basename(f) for f in settings_files]
seq_nums = [int(re.findall(r'(\d+)\.settings', f)[0]) for f in settings_files]
return max(seq_nums)
def get_current_seq(self):
return int(self._context._seq_no)
def same_seq_as_last_run(self):
return self.get_current_seq() == self.get_last_seq()
def exit_if_same_seq(self, exit_status=None):
current_seq = int(self._context._seq_no)
last_seq = self.get_last_seq()
if current_seq == last_seq:
self.log("the sequence numbers are same, so skipping daemon"+
", current=" +
str(current_seq) +
", last=" +
str(last_seq))
if exit_status:
self.do_status_report(exit_status['operation'],
exit_status['status'],
exit_status['status_code'],
exit_status['message'])
sys.exit(0)
def log(self, message):
self._log(self._get_log_prefix() + ': ' + message)
def error(self, message):
self._error(self._get_log_prefix() + ': ' + message)
def _parse_config(self, ctxt):
config = None
try:
config = json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if handlerSettings.has_key('protectedSettings') and \
handlerSettings.has_key("protectedSettingsCertThumbprint") and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb = handlerSettings['protectedSettingsCertThumbprint']
cert = waagent.LibDir + '/' + thumb + '.crt'
pkey = waagent.LibDir + '/' + thumb + '.prv'
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
waagent.SetFileContents(f.name, config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt = None
cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
if cleartxt == None:
self.error("OpenSSh decode error using thumbprint " + thumb)
do_exit(1, self.operation,'error','1', self.operation + ' Failed')
jctxt = ''
try:
jctxt = json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings'] = jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self, operation):
self.operation = operation
_context = self.try_parse_context()
if not _context:
self.log("no settings file found")
self.do_exit(0,
'QueryEncryptionStatus',
CommonVariables.extension_success_status,
str(CommonVariables.success),
'No operation found, find_last_nonquery_operation={0}'.format(self.find_last_nonquery_operation))
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env = None
config = None
ctxt = None
code = 0
# get the HandlerEnvironment.json. According to the extension handler
# spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file = './HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env = json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self.log("Parsing context, find_last_nonquery_operation={0}".format(self.find_last_nonquery_operation))
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir = handler_env['handlerEnvironment']['configFolder']
self._context._log_dir = handler_env['handlerEnvironment']['logFolder']
self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self._change_log_file()
self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
encryption_operation = None
while not encryption_operation:
self.log('Parsing context for sequence number: ' + self._context._seq_no)
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt = None
ctxt = waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
if int(self._context._seq_no) > 0:
self._context._seq_no = str(int(self._context._seq_no) - 1)
continue
return None
else:
if self.operation is not None and self.operation.lower() == "enable":
# we should keep the current status file
# self.backup_settings_status_file(self._context._seq_no)
pass
self._context._config = self._parse_config(ctxt)
public_settings_str = self._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
if isinstance(public_settings_str, basestring):
public_settings = json.loads(public_settings_str)
else:
public_settings = public_settings_str
encryption_operation = public_settings.get(CommonVariables.EncryptionEncryptionOperationKey)
self.log("Encryption operation: {0}".format(encryption_operation))
if self.find_last_nonquery_operation and encryption_operation == CommonVariables.QueryEncryptionStatus:
self.log("find_last_nonquery_operation was True and encryption_operation was query")
if int(self._context._seq_no) <= 0:
self.log("reached zero, returning")
return None
self.log("decrementing sequence number")
encryption_operation = None
self._context._seq_no = str(int(self._context._seq_no) - 1)
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def save_seq(self):
self.set_last_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def set_last_seq(self, seq):
waagent.SetFileContents('mrseq', str(seq))
def redo_last_status(self):
latest_seq = str(self.get_latest_seq())
self._context._status_file = os.path.join(self._context._status_dir, latest_seq + '.status')
previous_seq = str(self.get_latest_seq() - 1)
previous_status_file = os.path.join(self._context._status_dir, previous_seq + '.status')
shutil.copy2(previous_status_file, self._context._status_file)
self.log("[StatusReport ({0})] Copied {1} to {2}".format(latest_seq, previous_status_file, self._context._status_file))
def redo_current_status(self):
stat_rept = waagent.GetFileContents(self._context._status_file)
stat = json.loads(stat_rept)
self.do_status_report(stat[0]["status"]["operation"],
stat[0]["status"]["status"],
stat[0]["status"]["code"],
stat[0]["status"]["formattedMessage"]["message"])
def do_status_report(self, operation, status, status_code, message):
latest_seq = str(self.get_latest_seq())
self._context._status_file = os.path.join(self._context._status_dir, latest_seq + '.status')
message = filter(lambda c: c in string.printable, message)
message = message.encode('ascii', 'ignore')
self.log("[StatusReport ({0})] op: {1}".format(latest_seq, operation))
self.log("[StatusReport ({0})] status: {1}".format(latest_seq, status))
self.log("[StatusReport ({0})] code: {1}".format(latest_seq, status_code))
self.log("[StatusReport ({0})] msg: {1}".format(latest_seq, message))
tstamp = time.strftime(DateTimeFormat, time.gmtime())
stat = [{
"version" : self._context._version,
"timestampUTC" : tstamp,
"status" : {
"name" : self._context._name,
"operation" : operation,
"status" : status,
"code" : status_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
if self.disk_util:
encryption_status = self.disk_util.get_encryption_status()
self.log("[StatusReport ({0})] substatus: {1}".format(latest_seq, encryption_status))
substat = [{
"name" : self._context._name,
"operation" : operation,
"status" : status,
"code" : status_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : encryption_status
}
}]
stat[0]["status"]["substatus"] = substat
if "VMRestartPending" in encryption_status:
stat[0]["status"]["formattedMessage"]["message"] = "OS disk successfully encrypted, please reboot the VM"
stat_rept = json.dumps(stat)
# rename all other status files, or the WALA would report the wrong
# status file.
# because the wala choose the status file with the highest sequence
# number to report.
if self._context._status_file:
with open(self._context._status_file,'w+') as f:
f.write(stat_rept)
def backup_settings_status_file(self, _seq_no):
self.log("current seq no is " + _seq_no)
for subdir, dirs, files in os.walk(self._context._config_dir):
for file in files:
try:
if file.endswith('.settings') and file != (_seq_no + ".settings"):
new_file_name = file.replace(".","_")
os.rename(join(self._context._config_dir, file), join(self._context._config_dir, new_file_name))
except Exception as e:
self.log("failed to rename the settings file.")
def do_exit(self, exit_code, operation, status, code, message):
try:
self.do_status_report(operation, status, code, message)
except Exception as e:
self.log("Can't update status: " + str(e))
sys.exit(exit_code)
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
def archive_old_configs(self):
if not os.path.exists(self.config_archive_folder):
os.makedirs(self.config_archive_folder)
for root, dirs, files in os.walk(os.path.join(self._context._config_dir, '..')):
for file in files:
if file.endswith('.settings') or file == 'mrseq':
src = os.path.join(root, file)
dest = os.path.join(self.config_archive_folder, file)
self.log("Copying {0} to {1}".format(src, dest))
shutil.copy2(src, dest)
def restore_old_configs(self):
if not os.path.exists(self.config_archive_folder):
return
for root, dirs, files in os.walk(self.config_archive_folder):
for file in files:
if file.endswith('.settings'):
src = os.path.join(root, file)
dest = os.path.join(self._context._config_dir, file)
self.log("Copying {0} to {1}".format(src, dest))
shutil.copy2(src, dest)
if file == 'mrseq':
src = os.path.join(root, file)
dest = os.path.join(os.path.join(self._context._config_dir, '..'), file)
self.log("Copying {0} to {1}".format(src, dest))
shutil.copy2(src, dest)
|
|
#!/usr/bin/env python
"""Anonymises patient data, creates two copies, performs poisson resampling on one
to produce a simulated half time image and removes the odd numbered frames in the
other to simulate a half angle image"""
# Graham Arden, May 2009
# import the necessary libraries
import dicom
import os
import shutil
import time
import numpy
import Tkinter,tkFileDialog
# Function to generate a unique UID, based on time
def generate_new_UID():
UID_prefix = "1.2.826.0.1.3680043.8.707" # change this if your not me!
currentTime = (str(time.time()))
time1, time2 = currentTime.split(".")
UID = UID_prefix +"."+ time1 + ".1"+time2 # need to include the .1 to ensure UID element cannot start with 0
time.sleep(2) # wait 1 second to ensure ech UID is different
return UID
# Function to generate a simulated half time image using Poisson resampling.
def half_time(oldImage):
newImage = numpy.zeros_like(oldImage)
for i in range(0, oldImage.shape[0]):
for j in range(0, oldImage.shape[1]):
for k in range(0, oldImage.shape[2]):
origValue = oldImage[i,j,k]
newValue=(origValue/2)
Value = numpy.random.poisson(newValue)
newImage[i,j,k] = Value # write the value to newImage
return newImage
# Function to generate a simulated half angle image by stripping out odd numbered frames.
def half_angle(oldImage):
print "Old image was.....", oldImage.shape
originalNumberOfFrames=oldImage.shape[0]
newNumberOfFrames=originalNumberOfFrames/2
numberOfRows=oldImage.shape[1]
numberOfColumns=oldImage.shape[2]
index = range(0,originalNumberOfFrames,2)
newImage=numpy.zeros((newNumberOfFrames,numberOfRows,numberOfColumns), int)
newImage = numpy.take(oldImage, index, axis=0)
print "New image is......", newImage.shape
return newImage
# Same again, this time for gated images
#def half_angle_gated(oldImage):
# newImage = numpy.zeros(len(oldImage[0]/2), len(oldImage[1], len(oldImage[2], int)
# a = range(0,512)
# b = []
# [b.extend(a[i:i + 8]) for i in range (0, len(a), 16)]
# Generate a random order for the suffix
def random_suffix(numberOfChoices):
from random import shuffle
choices=numberOfChoices
possibleSuffixList = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
suffixList=possibleSuffixList[0:choices]
shuffle(suffixList)
return suffixList
# Anonymises all the other headers which may identify the patient
def anonymise_other_headers(ds):
if "PatientsBithDate" in ds:
ds.PatientsBirthDate=""
if "PatientsSex" in ds:
ds.PatientsSex=""
if "OtherPatientIDs" in ds:
ds.OtherPatientIDs=""
if "OtherPatientNames" in ds:
ds.therPatientNames=""
if "PatientsSize" in ds:
ds.PatientsSize=""
if "PatientsWeight" in ds:
ds.PatientsWeight=""
if "EthnicGroup" in ds:
ds.EthnicGroup=""
if "Occupation" in ds:
ds.Occupation=""
if (0x0009, 0x1040) in ds:
ds[0x0009,0x1040].value = ""
# Select the directory containing our files:
root = Tkinter.Tk()
root.withdraw()
fileDirectory = tkFileDialog.askdirectory(parent=root, title='Select the directory to save files in..', initialdir="C:\Documents and Settings\ardeng\My Documents\RR_Project\Bone")
detailsFile=os.path.join(fileDirectory,'File_details_YGC_bone.txt')
# Check if the file File_Details.txt exists, if not create it
if os.path.isfile(detailsFile):
print "File_details.txt exists, appending details"
textFile = open(detailsFile, "r") #open the details file in read-only mode
#Read the last patient number in the file and add 1 to it
allLines=textFile.readlines()
lastLine=allLines[-1]
allLastLine=lastLine.split()
lastNumber = int(allLastLine[0])
patientNumber=str(lastNumber + 1).zfill(4)
textFile.close()
else:
print "File_details.txt does not exist, I will create it"
patientNumber="0001" # Set the patient number to 0001
textFile = open(detailsFile, "w", 1)
textFile.write("Details of files ")
textFile.write("\n")
textFile.write("---------------- ")
textFile.write("\n")
textFile.write("\n")
textFile.write ("%-15s%-15s%-14s%-14s%-14s%-14s" %("Patient No.", "Original ID", "Original", "Half Time", "Original", "Half Angle"))
textFile.write("\n")
textFile.write("\n")
textFile.close()
textFile = open(detailsFile, "a") # open the details file in append mode in order to add new information at the end.
# Create directories we wish to save the modified images into
halfTimeImageDirectory=os.path.join(fileDirectory,patientNumber,"Half_time")
halfAngleImageDirectory=os.path.join(fileDirectory,patientNumber,"Half_angle")
os.makedirs(halfTimeImageDirectory)
os.makedirs(halfAngleImageDirectory)
# Generate new names and patient IDs (giving them random suffixes).
suffixList_HT = random_suffix(2) # generates random order
# Original half time files
newPatientID_HT_orig = "ZZT" + patientNumber + suffixList_HT[0]
newPatientName_HT_orig = "ZZT_Patient" + patientNumber + suffixList_HT[0]
HT_orig_UID = generate_new_UID()
time.sleep(2)
print "wait, generating UIDs!"
#modified half time files
newPatientID_HT = "ZZT" + patientNumber + suffixList_HT[1]
newPatientName_HT = "ZZT_Patient" + patientNumber + suffixList_HT[1]
HT_UID = generate_new_UID()
time.sleep(2)
print ".."
#---------------------------------------------------------------------------------------------------------------------------------------------
suffixList_HA = random_suffix(2) # generates random order
# Original half angle files
newPatientID_HA_orig = "ZZS" + patientNumber + suffixList_HA[0]
newPatientName_HA_orig = "ZZS_Patient" + patientNumber + suffixList_HA[0]
HA_orig_UID = generate_new_UID()
time.sleep(2)
print "..."
# modifed half angle files
newPatientID_HA = "ZZS" + patientNumber + suffixList_HA[1]
newPatientName_HA = "ZZS_Patient" + patientNumber + suffixList_HA[1]
HA_UID = generate_new_UID()
#----------------------------------------------------------------------------------------------------------------------------------------------
# Select the SPECT image (origSPECTImage)
root = Tkinter.Tk()
root.withdraw()
origSPECTImage = tkFileDialog.askopenfilename(title='Choose the SPECT file', initialdir="C:\Documents and Settings\ardeng\My Documents\RR_Project\Bone")
path,fi=os.path.split(origSPECTImage)
dsOrig = dicom.ReadFile(origSPECTImage)
# write these details to file
textFile.write ("%-15s%-15s%-14s%-14s%-14s%-14s" %(str(patientNumber), str(dsOrig.PatientID),str(newPatientID_HT_orig), str(newPatientID_HT), str(newPatientID_HA_orig), str(newPatientID_HA)))
textFile.write("\n")
textFile.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Unmodified version for half time study
newHalfTimeSPECTFileName_unmodified=newPatientID_HT_orig+"_"+fi # These will be our unaltered files
newHalfTimeSPECTFile_unmodified=os.path.join(halfTimeImageDirectory,newHalfTimeSPECTFileName_unmodified)
shutil.copy(origSPECTImage, newHalfTimeSPECTFile_unmodified)
ds = dicom.ReadFile(newHalfTimeSPECTFile_unmodified)
ds.PatientsName=newPatientName_HT_orig
ds.PatientID=newPatientID_HT_orig
anonymise_other_headers(ds)
ds.StudyInstanceUID=HT_orig_UID
ds.SeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "...."
ds.save_as(newHalfTimeSPECTFile_unmodified)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Modified Half Time SPECT image
newHalfTimeSPECTFileName_modified=newPatientID_HT+"_"+fi # These will be our modified files
newHalfTimeSPECTFile_modified=os.path.join(halfTimeImageDirectory,newHalfTimeSPECTFileName_modified)
shutil.copy(origSPECTImage, newHalfTimeSPECTFile_modified)
ds = dicom.ReadFile(newHalfTimeSPECTFile_modified)
ds.PatientsName=newPatientName_HT
ds.PatientID=newPatientID_HT
anonymise_other_headers(ds)
ds.StudyInstanceUID=HT_UID
ds.SeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "....."
oldSPECT = ds.PixelArray
print "Poisson sampling image - This may take some time!"
newSPECT = half_time(oldSPECT)
maximumValue = int(newSPECT.max())
# need to change some other headers to reflect the fact we have halved the time
if "LargestImagePixelValue" in ds:
ds.LargestImagePixelValue = maximumValue
if "LargestPixelValueinSeries" in ds:
ds.LargestPixelValueinSeries = maximumValue
if "ActualFrameDuration" in ds:
newFrameDuration = int((ds.ActualFrameDuration/2))
ds.ActualFrameDuration = newFrameDuration
ds.PixelData = newSPECT.tostring() # need to write values back to PixelData reather than PixelArray
ds.save_as(newHalfTimeSPECTFile_modified)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Unmodified version for half angle study
newHalfAngleSPECTFileName_unmodified=newPatientID_HA_orig+"_"+fi # These will be our unaltered files
newHalfAngleSPECTFile_unmodified=os.path.join(halfAngleImageDirectory,newHalfAngleSPECTFileName_unmodified)
shutil.copy(origSPECTImage, newHalfAngleSPECTFile_unmodified)
ds = dicom.ReadFile(newHalfAngleSPECTFile_unmodified)
ds.PatientsName=newPatientName_HA_orig
ds.PatientID=newPatientID_HA_orig
anonymise_other_headers(ds)
ds.StudyInstanceUID=HA_orig_UID
ds.SeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "......."
ds.save_as(newHalfAngleSPECTFile_unmodified)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Modified Half Angle SPECT image
newHalfAngleSPECTFileName_modified=newPatientID_HA+"_"+fi # These will be our modified files
newHalfAngleSPECTFile_modified=os.path.join(halfAngleImageDirectory,newHalfAngleSPECTFileName_modified)
shutil.copy(origSPECTImage, newHalfAngleSPECTFile_modified)
ds = dicom.ReadFile(newHalfAngleSPECTFile_modified)
ds.PatientsName=newPatientName_HA
ds.PatientID=newPatientID_HA
anonymise_other_headers(ds)
ds.StudyInstanceUID=HA_UID
ds.SeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "........"
# now create the actual half time image....
print "Creating the half angle image"
oldSPECT = ds.PixelArray
newSPECT=half_angle(oldSPECT)
ds.PixelData = newSPECT.tostring() # need to write values back to PixelData reather than PixelArray
#need to modify some other headers to reflect fact we have taken out half the frames
ds.NumberofFrames=ds.NumberofFrames/2
# Edit EnergyWindowVector, DetctorVector and RotationVector to take out every second frame
# Mote, all need to be list rather than arrays, hence use of 'tolist()'
headerIndex = range(0, len(ds.EnergyWindowVector), 2)
ds.EnergyWindowVector= (numpy.take(ds.EnergyWindowVector, headerIndex)).tolist()
ds.DetectorVector=(numpy.take(ds.DetectorVector, headerIndex)).tolist()
ds.RotationVector=(numpy.take(ds.RotationVector, headerIndex)).tolist()
# Edit AngularStep and NumberOfFramesInRotation
angularStep = ds[0x0054, 0x0052][0][0x0018, 0x1144].value
angularStep = angularStep*2
ds[0x0054, 0x0052][0][0x0018, 0x1144].value = angularStep
numberOfFramesInRotation = ds[0x0054, 0x0052][0][0x0054, 0x0053].value
numberOfFramesInRotation = numberOfFramesInRotation/2
ds[0x0054, 0x0052][0][0x0054, 0x0053].value = numberOfFramesInRotation
# Edit AngularViewVector
angularViewVectorIndex = range(0,(len(ds.AngularViewVector))/4)
angularViewVectorIndex.extend(angularViewVectorIndex)
newAngularViewVector = (numpy.take(ds.AngularViewVector, angularViewVectorIndex)).tolist()
ds.AngularViewVector = newAngularViewVector
# Assume Tomo View Offsets are in groupd of six and removes every alternate group of six
completeIndex=range(0,len(ds[0x0055, 0x1022][0][0x0013, 0x101e].value))
index=[]
[index.extend(completeIndex[i:i + 6]) for i in range (0, len(completeIndex), 12)]
tomoViewOffset = ds[0x0055, 0x1022][0][0x0013, 0x101e].value
newTomoViewOffset = (numpy.take(tomoViewOffset, index)).tolist()
ds[0x0055, 0x1022][0][0x0013, 0x101e].value=newTomoViewOffset
ds.save_as(newHalfAngleSPECTFile_modified)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Select the CT images (origCTImages)
root = Tkinter.Tk()
root.withdraw()
origCTImages = tkFileDialog.askopenfilenames(title='Choose the CT files', initialdir="C:\Documents and Settings\ardeng\My Documents\RR_Project\Bone")
print ".........."
halfTimeUnmodifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "..........."
halfTimeModifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "............"
halfAngleUnmodifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "............."
halfAngleModifiedSeriesInstanceUID=generate_new_UID()
for i in range(0, len(origCTImages)):
path,fi=os.path.split(origCTImages[i])
newHalfTimeFileName_unmodified=newPatientID_HT_orig+"_"+fi # These will be our unaltered files
newHalfTimeFile_unmodified=os.path.join(halfTimeImageDirectory,newHalfTimeFileName_unmodified)
shutil.copy(origCTImages[i], newHalfTimeFile_unmodified)
ds = dicom.ReadFile(newHalfTimeFile_unmodified)
ds.PatientsName=newPatientName_HT_orig
ds.PatientID=newPatientID_HT_orig
ds.StudyInstanceUID=HT_orig_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfTimeUnmodifiedSeriesInstanceUID
ds.save_as(newHalfTimeFile_unmodified)
newHalfTimeFileName_modified=newPatientID_HT+"_"+fi # These will be our modified files
newHalfTimeFile_modified=os.path.join(halfTimeImageDirectory,newHalfTimeFileName_modified)
shutil.copy(origCTImages[i], newHalfTimeFile_modified)
ds = dicom.ReadFile(newHalfTimeFile_modified)
ds.PatientsName=newPatientName_HT
ds.PatientID=newPatientID_HT
ds.StudyInstanceUID=HT_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfTimeModifiedSeriesInstanceUID
ds.save_as(newHalfTimeFile_modified)
newHalfAngleFileName_unmodified=newPatientID_HA_orig+"_"+fi # These will be our unaltered files
newHalfAngleFile_unmodified=os.path.join(halfAngleImageDirectory,newHalfAngleFileName_unmodified)
shutil.copy(origCTImages[i], newHalfAngleFile_unmodified)
ds = dicom.ReadFile(newHalfAngleFile_unmodified)
ds.PatientsName=newPatientName_HA_orig
ds.PatientID=newPatientID_HA_orig
ds.StudyInstanceUID=HA_orig_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfAngleUnmodifiedSeriesInstanceUID
ds.save_as(newHalfAngleFile_unmodified)
newHalfAngleFileName_modified=newPatientID_HA+"_"+fi # These will be our modified files
newHalfAngleFile_modified=os.path.join(halfAngleImageDirectory,newHalfAngleFileName_modified)
shutil.copy(origCTImages[i], newHalfAngleFile_modified)
ds = dicom.ReadFile(newHalfAngleFile_modified)
ds.PatientsName=newPatientName_HA
ds.PatientID=newPatientID_HA
ds.StudyInstanceUID=HA_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfAngleModifiedSeriesInstanceUID
ds.save_as(newHalfAngleFile_modified)
# Select the Attenuation Map images (origCTImages)
root = Tkinter.Tk()
root.withdraw()
origAttMapImage = tkFileDialog.askopenfilename(title='Choose the Attenuation Map file', initialdir="C:\Documents and Settings\ardeng\My Documents\RR_Project\Bone")
print "..............."
halfTimeUnmodifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "................"
halfTimeModifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print "................."
halfAngleUnmodifiedSeriesInstanceUID=generate_new_UID()
time.sleep(2)
print ".................."
halfAngleModifiedSeriesInstanceUID=generate_new_UID()
path,fi=os.path.split(origAttMapImage)
newHalfTimeFileName_unmodified=newPatientID_HT_orig+"_"+fi # These will be our unaltered files
newHalfTimeFile_unmodified=os.path.join(halfTimeImageDirectory,newHalfTimeFileName_unmodified)
shutil.copy(origAttMapImage, newHalfTimeFile_unmodified)
ds = dicom.ReadFile(newHalfTimeFile_unmodified)
ds.PatientsName=newPatientName_HT_orig
ds.PatientID=newPatientID_HT_orig
ds.StudyInstanceUID=HT_orig_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfTimeUnmodifiedSeriesInstanceUID
ds.save_as(newHalfTimeFile_unmodified)
newHalfTimeFileName_modified=newPatientID_HT+"_"+fi # These will be our modified files
newHalfTimeFile_modified=os.path.join(halfTimeImageDirectory,newHalfTimeFileName_modified)
shutil.copy(origAttMapImage, newHalfTimeFile_modified)
ds = dicom.ReadFile(newHalfTimeFile_modified)
ds.PatientsName=newPatientName_HT
ds.PatientID=newPatientID_HT
ds.StudyInstanceUID=HT_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfTimeModifiedSeriesInstanceUID
ds.save_as(newHalfTimeFile_modified)
newHalfAngleFileName_unmodified=newPatientID_HA_orig+"_"+fi # These will be our unaltered files
newHalfAngleFile_unmodified=os.path.join(halfAngleImageDirectory,newHalfAngleFileName_unmodified)
shutil.copy(origAttMapImage, newHalfAngleFile_unmodified)
ds = dicom.ReadFile(newHalfAngleFile_unmodified)
ds.PatientsName=newPatientName_HA_orig
ds.PatientID=newPatientID_HA_orig
ds.StudyInstanceUID=HA_orig_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfAngleUnmodifiedSeriesInstanceUID
ds.save_as(newHalfAngleFile_unmodified)
newHalfAngleFileName_modified=newPatientID_HA+"_"+fi # These will be our modified files
newHalfAngleFile_modified=os.path.join(halfAngleImageDirectory,newHalfAngleFileName_modified)
shutil.copy(origAttMapImage, newHalfAngleFile_modified)
ds = dicom.ReadFile(newHalfAngleFile_modified)
ds.PatientsName=newPatientName_HA
ds.PatientID=newPatientID_HA
ds.StudyInstanceUID=HA_UID
anonymise_other_headers(ds)
ds.SeriesInstanceUID=halfAngleModifiedSeriesInstanceUID
ds.save_as(newHalfAngleFile_modified)
print "...................Finished!"
# Close the text file
textFile.close
|
|
"""test_raspberry_pi.py - Test routines and sample code for communicating via LAN or RS232 with NEC large-screen displays
using the NEC PD SDK.
Revision: 180220
"""
#
#
# Copyright (C) 2016-18 NEC Display Solutions, Ltd
# written by Will Hollingworth <whollingworth at necdisplay.com>
# See LICENSE.rst for details.
#
from __future__ import print_function
from builtins import input
import logging
from nec_pd_sdk.nec_pd_sdk import NECPD
from nec_pd_sdk.protocol import PDError
from nec_pd_sdk.protocol import PDUnexpectedReplyError
from nec_pd_sdk.constants import *
from nec_pd_sdk.opcode_decoding import *
def reverse_dict(d):
return dict(list(zip(list(d.values()), list(d.keys()))))
def do_main_tests(pd):
try:
print("Testing: helper_asset_data_read")
value = pd.helper_asset_data_read()
print("helper_asset_data_read value: ", value)
print("Testing: command_model_name_read")
value = pd.command_model_name_read()
print("command_model_name_read value:", value)
print("Testing: command_serial_number_read")
value = pd.command_serial_number_read()
print("command_serial_number_read value:", value)
print("Testing: command_lan_mac_address_read")
value = pd.command_lan_mac_address_read()
print("command_lan_mac_address_read value:", value[0])
print("Testing: command_ip_address_read")
value = pd.command_ip_address_read()
print("command_ip_address_read value:", value[0])
print("Testing: helper_get_power_on_hours")
print("power on hours: ", pd.helper_get_power_on_hours())
print("Testing: helper_get_total_operating_hours")
print("total operating hours: ", pd.helper_get_total_operating_hours())
print("Testing: helper_get_temperature_sensor_values")
print("helper_get_temperature_sensor_values: ", pd.helper_get_temperature_sensor_values())
print("Testing: helper_get_fan_statuses")
print("helper_get_fan_statuses: ", pd.helper_get_fan_statuses())
print("Testing: command_power_status_read")
state = pd.command_power_status_read()
pd_power_states_rev = reverse_dict(PD_POWER_STATES)
if state in pd_power_states_rev:
print("power state: ", pd_power_states_rev[state])
else:
print("power state: Unknown state")
print("Testing: helper_firmware_versions_list")
text_list = pd.helper_firmware_versions_list()
ver_num = 0
for text in text_list:
ver_num += 1
print("helper_firmware_versions_list: FW#", ver_num, "=", text)
print("Testing: helper_set_parameter_as_percentage")
reply = pd.helper_set_parameter_as_percentage(OPCODE_PICTURE__BRIGHTNESS, 50)
print("helper_set_parameter_as_percentage result:", reply.result, "opcode:", hex(reply.opcode), "type:",
reply.type, "max_value:", reply.max_value, "current_value:", reply.current_value)
print("Testing: command_get_parameter")
reply = pd.command_get_parameter(OPCODE_PICTURE__BRIGHTNESS)
print("command_get_parameter result:", reply.result, "opcode:", hex(reply.opcode), "type:", reply.type,
"max_value:", reply.max_value, "current_value:", reply.current_value)
print("Testing: command_set_parameter")
reply = pd.command_set_parameter(OPCODE_PICTURE__BRIGHTNESS, reply.current_value)
print("command_set_parameter result:", reply.result, "opcode:", hex(reply.opcode), "type:", reply.type,
"max_value:", reply.max_value, "current_value:", reply.current_value)
print("Testing: helper_self_diagnosis_status_text")
text_list = pd.helper_self_diagnosis_status_text()
print("Diagnostics:", text_list)
print("Testing: helper_timing_report_text")
text_list = pd.helper_timing_report_text()
print("helper_timing_report_text:", text_list)
print("Testing: helper_asset_data_read")
text_list = pd.helper_asset_data_read()
print("helper_asset_data_read:", text_list)
print("Testing: helper_date_and_time_read")
value, daylight_savings = pd.helper_date_and_time_read()
print("helper_date_and_time_read.datetime:", str(value), "daylight_savings:", daylight_savings)
# try reading all of the opcodes that we know about
for x in get_opcode_list():
reply = pd.command_get_parameter(x)
print("command_get_parameter Opcode 0x", '%04x' % reply.opcode, sep='', end="")
name = opcode_to_nice_name(reply.opcode)
if name is not None:
print(" ('", name, "') ", sep='', end="")
else:
print(" (not in opcode_values_to_name_dict!)")
print("Result:", reply.result, end="")
if reply.result == 0: # opcode is supported
print(" (Supported) ", end="")
# print(" result:", reply.result, end="")
print("Type:", reply.type, end="")
if reply.type == 0:
print(" (Set Parameter) ", end="")
else:
print(" (Momentary) ", end="")
print("Max:", reply.max_value, "Current:", reply.current_value, end="")
name = opcode_value_to_nice_value_name(reply.opcode, reply.current_value)
if name is not None:
print(" ('", name, "')", sep='', end="")
else:
print(" (Unsupported) ", end="")
print("")
except PDUnexpectedReplyError as msg:
print("PDUnexpectedReplyError:", msg)
except PDError as msg:
print("PDError:", msg)
return
def main():
# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
logging.basicConfig(level=logging.WARNING, format='%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
# load the opcode dictionary from file
load_opcode_dict()
print("Presets: ")
print("'1' = COM1")
print("'2' = COM2")
print("'3' = COM3")
print("'4' = COM4")
print("'5' = /dev/ttyS0 (Raspberry Pi 3)")
print("'6' = /dev/ttyAMA0 (Raspberry Pi 1&2)")
print("'7' = 192.168.0.10 (Default IP)")
port = input("or Enter an IP address or COM port name: ")
if len(port) == 0:
port = '192.168.1.140'
elif len(port) == 1:
if port[0] == '1':
port = "COM1"
elif port[0] == '2':
port = "COM2"
elif port[0] == '3':
port = "COM3"
elif port[0] == '4':
port = "COM4"
elif port[0] == '5':
port = "/dev/ttyS0"
elif port[0] == '6':
port = "/dev/ttyAMA0"
elif port[0] == '7':
port = "192.168.0.10"
else:
print("Unknown option.")
return
print("Using port:", port)
try:
pd = NECPD.open(port)
monitor_id = input("Enter the Monitor ID (1-100, A-J or ALL (Enter for 1): ")
if len(monitor_id) == 0:
monitor_id = 1
pd.helper_set_destination_monitor_id(monitor_id)
try:
do_main_tests(pd)
print("Testing: Finished!")
finally:
# make sure to always close
pd.close()
except PDError as msg:
print("PDError:", msg)
return
if __name__ == '__main__':
main()
exit()
|
|
from os import path
import subprocess
import inflection
import jinja2
import xmltodict
HERE = path.dirname(path.abspath(__file__))
VENDOR_EXTENSIONS = ['KHR', 'EXT', 'NV']
CUSTOM_FUNCTIONS = ('vkGetInstanceProcAddr', 'vkGetDeviceProcAddr',
'vkMapMemory', 'vkGetPipelineCacheData')
NULL_MEMBERS = ('pNext', 'pAllocator', 'pUserData')
def get_enum_names(vk):
return {e['@name'] for e in vk['registry']['enums']}
def get_handle_names(vk):
return {s['name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'handle' and not s.get('@alias')}
def get_struct_names(vk):
return {s['@name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'struct'}
def get_union_names(vk):
return {s['name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'union'}
def parse_constant(constant, ext_number=0):
if '@bitpos' in constant:
value = constant['@bitpos']
num_val = int(value, 0)
num_val = 1 << num_val
return '0x%08x' % num_val
elif '@value' in constant:
return constant['@value']
elif '@offset' in constant:
ext_base = 1000000000
ext_block_size = 1000
value = ext_base + (ext_number - 1) * ext_block_size
value += int(constant['@offset'])
if constant.get('@dir') == '-':
value = -value
return value
def model_typedefs(vk, model):
"""Fill the model with typedefs
model['typedefs'] = {'name': 'type', ...}
"""
model['typedefs'] = {}
# bitmasks and basetypes
bitmasks = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'bitmask']
basetypes = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'basetype']
for typedef in bitmasks + basetypes:
if not typedef.get('type'):
continue
model['typedefs'][typedef['name']] = typedef['type']
# handles
handles = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'handle']
for handle in handles:
if 'name' not in handle or 'type' not in handle:
continue
n = handle['name']
t = handle['type']
if t == 'VK_DEFINE_HANDLE':
model['typedefs']['struct %s_T' % n] = '*%s' % n
if t == 'VK_DEFINE_HANDLE':
model['typedefs'][n] = 'uint64_t'
# custom plaform dependant
for name in ['Display', 'xcb_connection_t', 'wl_display', 'wl_surface',
'MirConnection', 'MirSurface', 'ANativeWindow',
'SECURITY_ATTRIBUTES']:
model['typedefs'][name] = 'struct %s' % name
model['typedefs'].update({
'Window': 'uint32_t', 'VisualID': 'uint32_t',
'xcb_window_t': 'uint32_t', 'xcb_visualid_t': 'uint32_t'
})
def model_enums(vk, model):
"""Fill the model with enums
model['enums'] = {'name': {'item_name': 'item_value'...}, ...}
"""
model['enums'] = {}
# init enums dict
enums_type = [x['@name'] for x in vk['registry']['types']['type']
if x.get('@category') == 'enum']
for name in enums_type:
model['enums'][name] = {}
# create enums
enums = [x for x in vk['registry']['enums']
if x.get('@type') in ('enum', 'bitmask')]
for enum in enums:
name = enum['@name']
t = enum.get('@type')
# enum may have no enums (because of extension)
if not enum.get('enum'):
continue
if t in ('enum', 'bitmask'):
# add attr to enum
for attr in enum['enum']:
if '@bitpos' in attr:
num_val = int(attr['@bitpos'], 0)
num_val = 1 << num_val
val = '0x%08x' % num_val
elif '@value' in attr:
val = attr['@value']
model['enums'][name][attr['@name']] = val
# Add computed value
def ext_name(name, extension):
if extension:
return name + '_' + extension
return name
extension = next(iter([x for x in VENDOR_EXTENSIONS
if name.lower().endswith(x)]), '').upper()
standard_name = inflection.underscore(name).upper()
if extension:
standard_name = standard_name.split(extension)[0][:-1]
if t == 'bitmask':
en = ext_name(standard_name, '_MAX_ENUM')
model['enums'][name][en] = 0x7FFFFFFF
else:
values = [int(x, 0) for x in model['enums'][name].values()]
begin_attr = ext_name(standard_name, '_BEGIN_RANGE')
end_attr = ext_name(standard_name, '_END_RANGE')
size_attr = ext_name(standard_name, '_RANGE_SIZE')
max_attr = ext_name(standard_name, '_MAX_ENUM')
model['enums'][name][begin_attr] = min(values)
model['enums'][name][end_attr] = max(values)
model['enums'][name][size_attr] = max(values) - min(values) + 1
model['enums'][name][max_attr] = 0x7FFFFFFF
# Enum in features
ext_base = 1000000000
ext_blocksize = 1000
# base + (ext - 1) * blocksize + offset
for feature in vk['registry']['feature']:
for require in feature['require']:
if not 'enum' in require:
continue
for enum in require['enum']:
if not '@extnumber' in enum:
continue
n1 = int(enum['@extnumber'])
n2 = int(enum['@offset'])
extend = enum['@extends']
val = ext_base + (n1 - 1) * ext_blocksize + n2
model['enums'][extend][enum['@name']] = val
def model_macros(vk, model):
"""Fill the model with macros
model['macros'] = {'name': value, ...}
"""
model['macros'] = {}
# API Macros
macros = [x for x in vk['registry']['enums']
if x.get('@type') not in ('bitmask', 'enum')]
# TODO: Check theses values
special_values = {'1000.0f': '1000.0',
'(~0U)': 0xffffffff,
'(~0ULL)': -1,
'(~0U-1)': 0xfffffffe,
'(~0U-2)': 0xfffffffd}
for macro in macros[0]['enum']:
if '@name' not in macro or '@value' not in macro:
continue
name = macro['@name']
value = macro['@value']
if value in special_values:
value = special_values[value]
model['macros'][name] = value
# Extension Macros
for ext in get_extensions_filtered(vk):
model['macros'][ext['@name']] = 1
for req in ext['require']:
for enum in req['enum']:
ename = enum['@name']
evalue = parse_constant(enum, int(ext['@number']))
# don't erase existing macros
if ename in model['macros']:
continue
if enum.get('@extends') == 'VkResult':
model['enums']['VkResult'][ename] = evalue
else:
model['macros'][ename] = evalue
def model_funcpointers(vk, model):
"""Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
"""
model['funcpointers'] = {}
funcs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'funcpointer']
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'struct']
for f in funcs:
pfn_name = f['name']
for s in structs:
if 'member' not in s:
continue
for m in s['member']:
if m['type'] == pfn_name:
struct_name = s['@name']
model['funcpointers'][pfn_name] = struct_name
def model_exceptions(vk, model):
"""Fill the model with exceptions and errors
model['exceptions'] = {val: 'name',...}
model['errors'] = {val: 'name',...}
"""
model['exceptions'] = {}
model['errors'] = {}
all_codes = model['enums']['VkResult']
success_names = set()
error_names = set()
commands = [x for x in vk['registry']['commands']['command']]
for command in commands:
successes = command.get('@successcodes', '').split(',')
errors = command.get('@errorcodes', '').split(',')
success_names.update(successes)
error_names.update(errors)
for key, value in all_codes.items():
if key.startswith('VK_RESULT') or key == 'VK_SUCCESS':
continue
name = inflection.camelize(key.lower())
if key in success_names:
model['exceptions'][value] = name
elif key in error_names:
model['errors'][value] = name
else:
print('Warning: return code %s unused' % key)
def model_constructors(vk, model):
"""Fill the model with constructors
model['constructors'] = [{'name': 'x', 'members': [{'name': 'y'}].}]
"""
model['constructors'] = []
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') in {'struct', 'union'}]
def parse_len(member):
mlen = member.get('@len')
if not mlen:
return None
if ',' in mlen:
mlen = mlen.split(',')[0]
if 'latex' in mlen or 'null-terminated' in mlen:
return None
return mlen
for struct in structs:
if 'member' not in struct:
continue
model['constructors'].append({
'name': struct['@name'],
'members': [{
'name': x['name'],
'type': x['type'],
'default': x.get('@values'),
'len': parse_len(x)
} for x in struct['member']]
})
def model_functions(vk, model):
"""Fill the model with functions"""
def get_vk_extension_functions():
names = set()
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if 'command' not in req:
continue
for command in req['command']:
cn = command['@name']
names.add(cn)
# add alias command too
for alias, n in model['alias'].items():
if n == cn:
names.add(alias)
return names
def has_count_param(command):
# check if a params type is uint32_t*
for param in command['param']:
if param['type'] + param.get('#text', '') == 'uint32_t*':
return True
return False
def member_has_str(name):
c = next(iter([x for x in model['constructors']
if x['name'] == name]), None)
if c and any(['char' in x['type'] for x in c['members']]):
return True
return False
def format_member(member):
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
return {'name': member['name'],
'type': member['type'],
'none': member['name'] in NULL_MEMBERS,
'force_array': True if '@len' in member else False,
'to_create': False,
'has_str': member_has_str(member['type'])}
def format_return_member(member):
t = member['type']
static_count = None
if '@len' in member:
# see vkCreateGraphicsPipelines for exemple
static_count = member['@len']
if '::' in static_count:
lens = member['@len'].split('::')
static_count = lens[0]+'.'+lens[1]
# see vkGetRayTracingShaderGroupHandlesNV for exemple
if '@len' in member and 'dataSize' in member['@len']:
t = 'uint64_t'
is_handle = t in get_handle_names(vk)
is_enum = t in get_enum_names(vk)
is_struct = t in get_struct_names(vk)
return {'name': member['name'],
'type': t,
'handle': is_handle,
'enum': is_enum,
'struct': is_struct,
'static_count': static_count,
'has_str': member_has_str(member['type'])}
ALLOCATE_PREFIX = ('vkCreate', 'vkGet', 'vkEnumerate', 'vkAllocate',
'vkMap', 'vkAcquire')
ALLOCATE_EXCEPTION = ('vkGetFenceStatus', 'vkGetEventStatus',
'vkGetQueryPoolResults',
'vkGetPhysicalDeviceXlibPresentationSupportKHR')
COUNT_EXCEPTION = ('vkAcquireNextImageKHR', 'vkEnumerateInstanceVersion')
model['functions'] = []
model['extension_functions'] = []
functions = [f for f in vk['registry']['commands']['command']]
extension_function_names = get_vk_extension_functions()
for function in functions:
if '@alias' in function:
continue
fname = function['proto']['name']
ftype = function['proto']['type']
if fname in CUSTOM_FUNCTIONS:
continue
if type(function['param']) is not list:
function['param'] = [function['param']]
count_param = has_count_param(function)
if fname in COUNT_EXCEPTION:
count_param = None
is_allocate = any([fname.startswith(a) for a in ALLOCATE_PREFIX])
is_count = is_allocate and count_param
if fname in ALLOCATE_EXCEPTION or ftype == 'VkBool32':
is_allocate = is_count = False
members = []
for member in function['param']:
members.append(format_member(member))
return_member = None
if is_allocate:
return_member = format_return_member(function['param'][-1])
members[-1]['to_create'] = True
if is_count:
members[-2]['to_create'] = True
f = {
'name': fname,
'members': members,
'allocate': is_allocate,
'count': is_count,
'return_boolean': True if ftype == 'VkBool32' else False,
'return_result': True if ftype == 'VkResult' else False,
'return_member': return_member,
'is_extension': fname in extension_function_names
}
model['functions'].append(f)
def model_ext_functions(vk, model):
"""Fill the model with extensions functions"""
model['ext_functions'] = {'instance': {}, 'device': {}}
# invert the alias to better lookup
alias = {v: k for k, v in model['alias'].items()}
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if not req.get('command'):
continue
ext_type = extension['@type']
for x in req['command']:
name = x['@name']
if name in alias.keys():
model['ext_functions'][ext_type][name] = alias[name]
else:
model['ext_functions'][ext_type][name] = name
def model_alias(vk, model):
"""Fill the model with alias since V1"""
model['alias'] = {}
# types
for s in vk['registry']['types']['type']:
if s.get('@category', None) == 'handle' and s.get('@alias'):
model['alias'][s['@alias']] = s['@name']
# commands
for c in vk['registry']['commands']['command']:
if c.get('@alias'):
model['alias'][c['@alias']] = c['@name']
def init():
with open(path.join(HERE, 'vk.xml')) as f:
xml = f.read()
return xmltodict.parse(xml, force_list=('enum', 'command', 'member'))
def get_extensions_filtered(vk):
return [x for x in vk['registry']['extensions']['extension']
if 'DO_NOT_USE' not in x.get('@name', 'dummy')]
def format_vk(vk):
"""Format vk before using it"""
# Force extension require to be a list
for ext in get_extensions_filtered(vk):
req = ext['require']
if not isinstance(req, list):
ext['require'] = [req]
def generate_py():
"""Generate the python output file"""
model = {}
vk = init()
format_vk(vk)
model_alias(vk, model)
model_typedefs(vk, model)
model_enums(vk, model)
model_macros(vk, model)
model_funcpointers(vk, model)
model_exceptions(vk, model)
model_constructors(vk, model)
model_functions(vk, model)
model_ext_functions(vk, model)
env = jinja2.Environment(
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(HERE)
)
out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py')
with open(out_file, 'w') as out:
out.write(env.get_template('vulkan.template.py').render(model=model))
def generate_cdef():
"""Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True)
def main():
"""Main function to generate files"""
generate_cdef()
generate_py()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from sklearn.linear_model import LinearRegression, Ridge
from sklearn import svm
from SuperGLU.Classifiers.Metrics import calcR2
from SuperGLU.Classifiers.ModelFeature import FeatureSelector, ModelFeature
# Builder for Classifires
#------------------------------
class ClassifierBuilder(object):
"""
Class for building a classifier from data, which creates both the feature set and
the trained classifier from the available data.
"""
def __init__(self, modelMaker, features=None, params=None, modelParams=None):
"""
Initialize the Classifier Builder
@param modelMaker: A callable class that creates a trainable model with
certain features and expected model params
@param featureTypes: A list of categories for of feature that could be created, where one
feature type might make many features (e.g., "every n-tuple of words that occurs")
@param params: Parameters for evaluating feature types
@param modelParams: Parameters to pass in to the model maker (e.g., update rates, convergence criteria)
"""
if features is None: features = []
if params is None: params = {}
if modelParams is None: modelParams = {}
self._modelMaker = modelMaker
self._featureTypes = features
self._params = params
self._modelParams = modelParams
# ------------------------------------------------
# Functions to Generate a Set of Possible Features
# ------------------------------------------------
def getFeatureTypes(self):
""" The names of the types of features to create """
return [x.getName() for x in self._featureTypes]
def selectAllFeatures(self, inputs, outputs=None):
""" Identify the set of possible features across all feature types """
allFeatures = []
for featureType in self._featureTypes:
features = self.selectFeatures(featureType, inputs, outputs)
allFeatures.extend(features)
features = sorted(features, key=lambda x: x.getName())
return allFeatures
def selectFeatures(self, featureType, inputs, outputs=None):
""" Identify the set of possible features for a given feature type """
if isinstance(featureType, FeatureSelector):
features = featureType(inputs, outputs, self._params)
elif isinstance(featureType, ModelFeature):
features = [featureType]
else:
raise TypeError('Invalid feature selector or feature, got: %s'%(featureType,))
return features
# -------------------------------------------------
# Functions to Calculate a Matrix of Feature Values
# -------------------------------------------------
def calculateFeatureValues(self, inputs, outputs=None):
""" Calculate the matrix of feature values to use for training """
features = self.selectAllFeatures(inputs, outputs)
inputFeatureSets = []
for inp in inputs:
inpVals = [f(inp) for f in features]
inputFeatureSets.append(inpVals)
return inputFeatureSets
def makeFeatureTable(self, inputs, outputs):
""" Create a table of features, where the outputs are added as a final column """
features = self.selectAllFeatures(inputs, outputs)
inputs = self.calculateFeatureValues(inputs, outputs)
header = [f.getName() for f in features] + ['Output']
outRecords = [header]
for i, inp in enumerate(inputs):
outRecord = inp + [outputs[i]]
outRecords.append(outRecord)
return outRecords
# -------------------------------------------------
# Functions to Train Models based on Data
# -------------------------------------------------
def makeModel(self, features, inputs, outputs):
""" Create a trained model from the given features, inputs, and outputs """
featureVals = sorted([(f.getName(), f) for f in features])
featureVals = [f for name, f in featureVals]
model = self._modelMaker(featureVals, dict(self._modelParams))
model.train(inputs, outputs)
return model
def __call__(self, inputs, outputs):
""" Run the Classifier Builder on data, selecting features and training a model with them """
allFeatures = self.selectAllFeatures(inputs, outputs)
model = self.makeModel(allFeatures, inputs, outputs)
return model
# Stored Classifiers
#------------------------------
class ClassifierModel(object):
""" Wrapper/base class for models that are trained based on some features """
def __init__(self, features, params=None):
"""
Initialize the model
@param features: The set of features to calculate, based on some input data
@param params: Parameters needed to train the model
"""
if params is None: params = {}
self._params = dict(params)
self._features = features
def __call__(self, value):
"""
Turn an instance into features, then predict the output
@param value: Any valid object that the features can process
@return: Predicted output label
"""
features = self.makeFeatures(value)
return self._predict(features)
def train(self, inputs, outputs):
"""
Train a classifier model
@param inputs: Input instances
@param outputs: Output labels
"""
features = [self.makeFeatures(anInput) for anInput in inputs]
self._train(features, outputs)
def getFeatureNames(self):
""" Return the names of all features """
return [x.getName() for x in self._features]
def getFeatureImportances(self):
""" Return the relative importance weights of all features, if available """
return [(name, '?') for name in self.getFeatureNames()]
def makeFeatures(self, anInput):
""" Turn an instance input into a vector of features """
return [f(anInput) for f in self._features]
def _predict(self, features):
""" Predict an output label based on feature values """
raise NotImplementedError
def _train(self, features, outputs):
""" Train some classifier model """
pass
def calcR2(self, inputs, outputs):
""" Calculate a basic R2 value for the given input-output set """
vals = [self(anInput) for anInput in inputs]
return calcR2(vals, outputs)
def score(self, inputs, outputs):
""" Calculate a score for this model's predictions based on the input-output set """
return self.calcR2(inputs, outputs)
class LinearClassModel(ClassifierModel):
""" Wrapper for a linear classifier model """
def __init__(self, features, params=None):
"""
Initialize the classifier
@param valMin: A minimum value to apply to the output (floor)
@param valMax: A maximum value to apply to the output (ceiling)
"""
if params is None: params = {}
params = dict(params)
self._valMin = params.pop('ValMin', None)
self._valMax = params.pop('ValMax', None)
super(LinearClassModel, self).__init__(features, params)
self._model = None
def getFeatureImportances(self):
""" Return feature importances, which are the coefficients """
return [(name, self._model.coef_[i]) for i, name in enumerate(self.getFeatureNames())]
def _train(self, features, outputs):
""" Fit a linear regression model """
#est = LinearRegression(**self._params)
est = Ridge(**self._params)
est.fit(features, outputs)
self._model = est
def _predict(self, features):
""" Predict labels for the feature vector, applying the min-max as bounds """
val = float(self._model.predict([features]))
if self._valMin is not None:
val = max(self._valMin, val)
if self._valMax is not None:
val = min(self._valMax, val)
return val
class SVMModel(ClassifierModel):
""" Wrapper for an SVM model """
DEFAULT_KERNEL = 'linear'
DEFAULT_C = 1.0
DEFAULT_WEIGHT = 'auto'
def __init__(self, features, params=None):
"""
Initialize the SVM Model wrapper
@param OutputTransform: A transform function to the apply to the SVM model output (e.g., binning)
"""
self._outputTransform = params.pop('OutputTransform', lambda x: x)
super(SVMModel, self).__init__(features, params)
self._model = None
self._loadDefaultParams(False)
def _loadDefaultParams(self, overwrite=False):
params = {'kernel' : self.DEFAULT_KERNEL,
'C' : self.DEFAULT_C,
'class_weight' : 'auto'}
for k, v in params.items():
if overwrite or k not in self._params:
self._params[k] = v
def _train(self, features, outputs):
""" Train an SVM model, transforming the features first """
outputs = [self._outputTransform(o) for o in outputs]
est = svm.SVC(kernel='linear', C=1.0, class_weight='auto')
est.fit(features, outputs)
self._model = est
def _predict(self, features):
""" Predict a value using the SVM model """
return float(self._model.predict([features]))
|
|
import logging
import os
import re
from urllib.parse import urlsplit
class Whitelist():
def __init__(self, config):
# Initiate logging.
self.logger = logging.getLogger()
# Save the config. This should be a ConfigParser object.
self.config = config
# This will read your config file and create class variables
# named <section>_<key>. For example, if your config file has
# a section named "Whitelists" and a key in that section named
# "ip" with a path to your IP whitelist file, this code will
# read that file, create a list out of its lines, and assign it
# to a variable as self.Whitelists_ip.
sections_to_parse = ["Whitelists", "Benignlists"]
for section in sections_to_parse:
for key in self.config[section]:
section_key = section + "_" + key
if not hasattr(self, section_key):
self.logger.debug("Loading white/benign list: " + self.config[section][key])
try:
with open(self.config[section][key]) as f:
lines = f.read().splitlines()
# Remove any lines that begin with #.
lines = [line for line in lines if not line.startswith("#")]
# Remove any blank lines.
lines = [line for line in lines if line]
# Store the lines list at self.<section>_<key>
setattr(self, section_key, lines)
except Exception:
self.logger.exception("Error loading white/benign list: " + self.config[section][key])
# Override __get/setstate__ in case someone
# wants to pickle an object of this class.
def __getstate__(self):
d = dict(self.__dict__)
if "logger" in d:
del d["logger"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
def is_valid_tld(self, domain):
if hasattr(self, 'Whitelists_valid_tlds'):
if any(domain.endswith(tld) for tld in self.Whitelists_valid_tlds):
return True
else:
return False
else:
return True
def is_domain_url_shortener(self, domain):
if hasattr(self, "Whitelists_shortlinks"):
if domain in self.Whitelists_shortlinks:
return True
else:
return False
else:
return False
def is_tor_node(self, ip):
if hasattr(self, "Benignlists_tor_nodes"):
if ip in self.Benignlists_tor_nodes:
return True
else:
return False
else:
return False
def is_ip_whitelisted(self, ip):
if hasattr(self, "Whitelists_ip"):
if ip in self.Whitelists_ip:
return True
else:
for regex in self.Whitelists_ip:
pattern = re.compile(regex)
if pattern.search(ip):
return True
else:
return False
else:
return False
def is_ip_benign(self, ip):
if hasattr(self, "Benignlists_ip"):
if ip in self.Benignlists_ip:
return True
else:
for regex in self.Benignlists_ip:
pattern = re.compile(regex)
if pattern.search(ip):
return True
else:
return False
else:
return False
def is_domain_whitelisted(self, domain):
if hasattr(self, "Whitelists_domain"):
for regex in self.Whitelists_domain:
pattern = re.compile(regex)
if pattern.search(domain):
return True
else:
return False
else:
return False
def is_domain_benign(self, domain):
if hasattr(self, "Benignlists_domain"):
for regex in self.Benignlists_domain:
pattern = re.compile(regex)
if pattern.search(domain):
return True
else:
return False
else:
return False
def is_file_path_whitelisted(self, file_path):
if hasattr(self, "Whitelists_filepath"):
for regex in self.Whitelists_filepath:
pattern = re.compile(regex)
if pattern.search(file_path):
return True
else:
return False
else:
return False
def is_file_path_benign(self, file_path):
if hasattr(self, "Benignlists_filepath"):
for regex in self.Benignlists_filepath:
pattern = re.compile(regex)
if pattern.search(file_path):
return True
else:
return False
else:
return False
def is_file_name_whitelisted(self, file_name):
if hasattr(self, "Whitelists_filename"):
for regex in self.Whitelists_filename:
pattern = re.compile(regex)
if pattern.search(file_name):
return True
else:
return False
else:
return False
def is_file_name_benign(self, file_name):
if hasattr(self, "Benignlists_filename"):
for regex in self.Benignlists_filename:
pattern = re.compile(regex)
if pattern.search(file_name):
return True
else:
return False
else:
return False
def is_email_whitelisted(self, email):
if hasattr(self, "Whitelists_email"):
for regex in self.Whitelists_email:
pattern = re.compile(regex)
if pattern.search(email):
return True
else:
return False
else:
return False
def is_email_benign(self, email):
if hasattr(self, "Benignlists_email"):
for regex in self.Benignlists_email:
pattern = re.compile(regex)
if pattern.search(email):
return True
else:
return False
else:
return False
def is_md5_whitelisted(self, md5):
if hasattr(self, "Whitelists_md5"):
for regex in self.Whitelists_md5:
pattern = re.compile(regex)
if pattern.search(md5):
return True
else:
return False
else:
return False
def is_md5_benign(self, md5):
if hasattr(self, "Benignlists_md5"):
for regex in self.Benignlists_md5:
pattern = re.compile(regex)
if pattern.search(md5):
return True
else:
return False
else:
return False
def is_sha1_whitelisted(self, sha1):
if hasattr(self, "Whitelists_sha1"):
for regex in self.Whitelists_sha1:
pattern = re.compile(regex)
if pattern.search(sha1):
return True
else:
return False
else:
return False
def is_sha1_benign(self, sha1):
if hasattr(self, "Benignlists_sha1"):
for regex in self.Benignlists_sha1:
pattern = re.compile(regex)
if pattern.search(sha1):
return True
else:
return False
else:
return False
def is_sha256_whitelisted(self, sha256):
if hasattr(self, "Whitelists_sha256"):
for regex in self.Whitelists_sha256:
pattern = re.compile(regex)
if pattern.search(sha256):
return True
else:
return False
else:
return False
def is_sha256_benign(self, sha256):
if hasattr(self, "Benignlists_sha256"):
for regex in self.Benignlists_sha256:
pattern = re.compile(regex)
if pattern.search(sha256):
return True
else:
return False
else:
return False
def is_registry_whitelisted(self, reg_key):
if hasattr(self, "Whitelists_registry"):
for regex in self.Whitelists_registry:
pattern = re.compile(regex)
if pattern.search(reg_key):
return True
else:
return False
else:
return False
def is_registry_benign(self, reg_key):
if hasattr(self, "Benignlists_registry"):
for regex in self.Benignlists_registry:
pattern = re.compile(regex)
if pattern.search(reg_key):
return True
else:
return False
else:
return False
def is_url_whitelisted(self, url):
# Parse the URL so we can first see if the domain or IP is whitelisted.
parsed_url = parsed_url = urlsplit(url)
if parsed_url.netloc:
if self.is_domain_whitelisted(parsed_url.netloc):
return True
if self.is_ip_whitelisted(parsed_url.netloc):
return True
# Now check if the URI path is whitelisted.
# TODO...
# Finally, see if the URL as a whole is whitelisted.
if hasattr(self, "Whitelists_url"):
for regex in self.Whitelists_url:
pattern = re.compile(regex)
if pattern.search(url):
return True
else:
return False
else:
return False
def is_url_benign(self, url):
if hasattr(self, "Benignlists_url"):
for regex in self.Benignlists_url:
pattern = re.compile(regex)
if pattern.search(url):
return True
else:
return False
else:
return False
def is_mutex_whitelisted(self, mutex):
if hasattr(self, "Whitelists_mutex"):
if mutex in self.Whitelists_mutex:
return True
else:
for regex in self.Whitelists_mutex:
pattern = re.compile(regex)
if pattern.search(mutex):
return True
else:
return False
else:
return False
def is_mutex_benign(self, mutex):
if hasattr(self, "Benignlists_mutex"):
if mutex in self.Benignlists_mutex:
return True
else:
for regex in self.Benignlists_mutex:
pattern = re.compile(regex)
if pattern.search(mutex):
return True
else:
return False
else:
return False
def is_thing_whitelisted(self, thing):
return(self.is_ip_whitelisted(thing) or
self.is_domain_whitelisted(thing) or
self.is_file_path_whitelisted(thing) or
self.is_file_name_whitelisted(thing) or
self.is_email_whitelisted(thing) or
self.is_md5_whitelisted(thing) or
self.is_sha1_whitelisted(thing) or
self.is_sha256_whitelisted(thing) or
self.is_registry_whitelisted(thing) or
self.is_url_whitelisted(thing) or
self.is_mutex_whitelisted(thing))
def is_thing_benign(self, thing):
return(self.is_ip_benign(thing) or
self.is_domain_benign(thing) or
self.is_file_path_benign(thing) or
self.is_file_name_benign(thing) or
self.is_email_benign(thing) or
self.is_md5_benign(thing) or
self.is_sha1_benign(thing) or
self.is_sha256_benign(thing) or
self.is_registry_benign(thing) or
self.is_url_benign(thing) or
self.is_mutex_benign(thing))
|
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .constants import FIFF
from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
from .pick import pick_types
from .base import _BaseRaw
from ..evoked import Evoked
from ..epochs import Epochs
from ..utils import logger
def _apply_reference(inst, ref_from, ref_to=None, copy=True):
"""Apply a custom EEG referencing scheme.
Calculates a reference signal by taking the mean of a set of channels and
applies the reference to another set of channels. Input data can be in the
form of Raw, Epochs or Evoked.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the EEG channels and reference channel(s).
ref_from : list of str
The names of the channels to use to construct the reference. If an
empty list is specified, the data is assumed to already have a proper
reference and MNE will not attempt any re-referencing of the data.
ref_to : list of str | None
The names of the channels to apply the reference to. By default,
all EEG channels are chosen.
copy : bool
Specifies whether the data will be copied (True) or modified in place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The data with EEG channels rereferenced.
ref_data : array, shape (n_times,)
Array of reference data subtracted from EEG channels.
Notes
-----
1. Do not use this function to apply an average reference. By default, an
average reference projection has already been added upon loading raw
data.
2. If the reference is applied to any EEG channels, this function removes
any pre-existing average reference projections.
3. During source localization, the EEG signal should have an average
reference.
4. The data must be preloaded.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
set_bipolar_reference : Convenience function for creating a bipolar
reference.
"""
# Check to see that data is preloaded
if not isinstance(inst, Evoked) and not inst.preload:
raise RuntimeError('Data needs to be preloaded. Use '
'preload=True (or string) in the constructor.')
eeg_idx = pick_types(inst.info, eeg=True, meg=False, ref_meg=False)
if ref_to is None:
ref_to = [inst.ch_names[i] for i in eeg_idx]
if copy:
inst = inst.copy()
# After referencing, existing SSPs might not be valid anymore.
for i, proj in enumerate(inst.info['projs']):
if (not proj['active'] and
len([ch for ch in (ref_from + ref_to)
if ch in proj['data']['col_names']]) > 0):
# Remove any average reference projections, apply any other types
if proj['desc'] == 'Average EEG reference' or \
proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF:
logger.info('Removing existing average EEG reference '
'projection.')
del inst.info['projs'][i]
else:
logger.info(
'Inactive signal space projection (SSP) operators are '
'present that operate on sensors involved in the current '
'referencing scheme. Applying them now. Be aware that '
'after re-referencing, these operators will be invalid.')
inst.apply_proj()
break
ref_from = [inst.ch_names.index(ch) for ch in ref_from]
ref_to = [inst.ch_names.index(ch) for ch in ref_to]
if isinstance(inst, Evoked):
data = inst.data
else:
data = inst._data
# Compute reference
if len(ref_from) > 0:
ref_data = data[..., ref_from, :].mean(-2)
if isinstance(inst, Epochs):
data[:, ref_to, :] -= ref_data[:, np.newaxis, :]
else:
data[ref_to] -= ref_data
else:
ref_data = None
# If the reference touches EEG electrodes, note in the info that a non-CAR
# has been applied.
if len(np.intersect1d(ref_to, eeg_idx)) > 0:
inst.info['custom_ref_applied'] = True
return inst, ref_data
def add_reference_channels(inst, ref_channels, copy=True):
"""Add reference channels to data that consists of all zeros.
Adds reference channels to data that were not included during recording.
This is useful when you need to re-reference your data to different
channel. These added channels will consist of all zeros.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
ref_channels : str | list of str
Name of the electrode(s) which served as the reference in the
recording. If a name is provided, a corresponding channel is added
and its data is set to 0. This is useful for later re-referencing.
copy : bool
Specifies whether the data will be copied (True) or modified in place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with added EEG reference channels.
"""
# Check to see that data is preloaded
if not isinstance(inst, Evoked) and not inst.preload:
raise RuntimeError('Data needs to be preloaded.')
if isinstance(ref_channels, str):
ref_channels = [ref_channels]
elif not isinstance(ref_channels, list):
raise ValueError("`ref_channels` should be either str or list of str. "
"%s was provided." % type(ref_channels))
for ch in ref_channels:
if ch in inst.info['ch_names']:
raise ValueError("Channel %s already specified in inst." % ch)
if copy:
inst = inst.copy()
if isinstance(inst, Evoked):
data = inst.data
refs = np.zeros((len(ref_channels), data.shape[1]))
data = np.vstack((data, refs))
inst.data = data
elif isinstance(inst, _BaseRaw):
data = inst._data
refs = np.zeros((len(ref_channels), data.shape[1]))
data = np.vstack((data, refs))
inst._data = data
elif isinstance(inst, Epochs):
data = inst._data
x, y, z = data.shape
refs = np.zeros((x * len(ref_channels), z))
data = np.vstack((data.reshape((x * y, z), order='F'), refs))
data = data.reshape(x, y + len(ref_channels), z, order='F')
inst._data = data
else:
raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
% type(inst))
nchan = len(inst.info['ch_names'])
if ch in ref_channels:
chan_info = {'ch_name': ch,
'coil_type': FIFF.FIFFV_COIL_EEG,
'kind': FIFF.FIFFV_EEG_CH,
'logno': nchan + 1,
'scanno': nchan + 1,
'cal': 1,
'range': 1.,
'unit_mul': 0.,
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'eeg_loc': np.zeros(3),
'loc': np.zeros(12)}
inst.info['chs'].append(chan_info)
inst.info['ch_names'].extend(ref_channels)
inst.info['nchan'] = len(inst.info['ch_names'])
if isinstance(inst, _BaseRaw):
inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
return inst
def set_eeg_reference(inst, ref_channels=None, copy=True):
"""Rereference EEG channels to new reference channel(s).
If multiple reference channels are specified, they will be averaged. If
no reference channels are specified, an average reference will be applied.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Instance of Raw or Epochs with EEG channels and reference channel(s).
ref_channels : list of str | None
The names of the channels to use to construct the reference. If None is
specified here, an average reference will be applied in the form of an
SSP projector. If an empty list is specified, the data is assumed to
already have a proper reference and MNE will not attempt any
re-referencing of the data. Defaults to an average reference (None).
copy : bool
Specifies whether the data will be copied (True) or modified in place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced.
ref_data : array
Array of reference data subtracted from EEG channels.
Notes
-----
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference other than an average reference, the data
must be preloaded.
.. versionadded:: 0.9.0
See Also
--------
set_bipolar_reference : Convenience function for creating bipolar
references.
"""
if ref_channels is None:
# CAR requested
if _has_eeg_average_ref_proj(inst.info['projs']):
logger.warning('An average reference projection was already '
'added. The data has been left untouched.')
return inst, None
else:
inst.info['custom_ref_applied'] = False
inst.add_proj(make_eeg_average_ref_proj(inst.info, activate=False))
return inst, None
else:
logger.info('Applying a custom EEG reference.')
return _apply_reference(inst, ref_channels, copy=copy)
def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
copy=True):
"""Rereference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped.
Multiple anodes and cathodes can be specified, in which case multiple
vitual channels will be created. The 1st anode will be substracted from the
1st cathode, the 2nd anode from the 2nd cathode, etc.
By default, the virtual channels will be annotated with channel info of
the anodes, their locations set to (0, 0, 0) and coil types set to
EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
"""
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError('Number of anodes must equal the number of cathodes.')
if ch_name is None:
ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError('Number of channel names must equal the number of '
'anodes/cathodes.')
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError('There is already a channel named "%s", please '
'specify a different name for the bipolar '
'channel using the ch_name parameter.' % ch)
if ch_info is None:
ch_info = [{} for an in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError('Number of channel info dictionaries must equal the '
'number of anodes/cathodes.')
# Merge specified and anode channel information dictionaries
new_ch_info = []
for an, ci in zip(anode, ch_info):
new_info = inst.info['chs'][inst.ch_names.index(an)].copy()
# Set channel location and coil type
if 'eeg_loc' in new_info:
new_info['eeg_loc'] = np.zeros((3, 2))
new_info['loc'] = np.zeros(12)
new_info['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
new_info.update(ci)
new_ch_info.append(new_info)
if copy:
inst = inst.copy()
# Perform bipolar referencing
for an, ca, name, info in zip(anode, cathode, ch_name, new_ch_info):
inst, _ = _apply_reference(inst, [ca], [an], copy=False)
an_idx = inst.ch_names.index(an)
inst.info['chs'][an_idx] = info
inst.info['chs'][an_idx]['ch_name'] = name
inst.info['ch_names'][an_idx] = name
logger.info('Bipolar channel added as "%s".' % name)
# Drop cathode channels
inst.drop_channels(cathode)
return inst
|
|
# Author: Kyle Kastner
# License: BSD 3-clause
# THEANO_FLAGS="optimizer=None,compute_test_value=raise" python tanh_rnn.py
import numpy as np
import theano
import theano.tensor as T
from scipy import linalg
class sgd(object):
# Only here for API conformity with other optimizers
def __init__(self, params):
pass
def updates(self, params, grads, learning_rate):
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
updates.append((param, param - learning_rate * grad))
return updates
def np_zeros(shape):
""" Builds a numpy variable filled with zeros """
return np.zeros(shape).astype(theano.config.floatX)
def np_rand(shape, rng):
""" Builds a numpy variable filled with random values """
return (0.01 * (rng.rand(*shape) - 0.5)).astype(theano.config.floatX)
def np_ortho(shape, rng, name=None):
""" Builds a numpy variable filled with orthonormal random values """
g = rng.randn(*shape)
o_g = linalg.svd(g)[0]
return o_g.astype(theano.config.floatX)
minibatch_size = 4
# number of input units
n_in = 1
# number of hidden units
n_hid = 10
# number of output units
n_out = 1
# Generate sinewaves offset in phase
n_timesteps = 50
control = np.zeros(n_timesteps)
control = np.linspace(0, 5, len(control) // 2)
control = np.concatenate((control, control[::-1]))
def frequency_modulation(modulation_signal, carrier_freq=100.,
sampling_freq=44100., modulation_strength=1.):
assert modulation_signal.ndim == 1
t = np.arange(len(modulation_signal)) / sampling_freq
integrated = np.cumsum(modulation_signal) / sampling_freq
modulated = np.sin(2. * np.pi * (carrier_freq * t +
modulation_strength * integrated))
return modulated
full_sines = frequency_modulation(control, carrier_freq=.1,
sampling_freq=n_timesteps)
full_sines = full_sines[:, None]
full_sines = np.concatenate([full_sines] * minibatch_size, axis=-1).astype(
theano.config.floatX)
full_sines = full_sines[:, :, None]
full_sines = np.concatenate((full_sines, full_sines), axis=0)
full_sines = np.concatenate((full_sines, full_sines), axis=0)
n_full = 4 * n_timesteps
# Setup dataset and initial hidden vector of zeros
all_sines = full_sines[:2 * n_timesteps]
all_sines = all_sines.astype(theano.config.floatX)
X = all_sines[:-1]
y = all_sines[1:]
h_init = np_zeros((minibatch_size, n_hid))
# input (where first dimension is time)
X_sym = T.tensor3()
# target (where first dimension is time)
y_sym = T.tensor3()
# initial hidden state of the RNN
h0 = T.dmatrix()
# tag.test_value is crucial for debugging! Run the script with
# THEANO_FLAGS="compute_test_value=raise,optimizer=None"
# for cleaner debugging
X_sym.tag.test_value = X[:10]
y_sym.tag.test_value = y[:10]
h0.tag.test_value = h_init
# Using checked versions should allow for both float32 and float64 theano flags
X_check = T.cast(X_sym, theano.config.floatX)
y_check = T.cast(y_sym, theano.config.floatX)
h0_check = T.cast(h0, theano.config.floatX)
# Setup weights
random_state = np.random.RandomState(1999)
# Orthogonal initialization is good for recurrent (square) matrices!
# Advances in Optimizing Recurrent Networks
# Bengio, Boulanger-Lewandowski, Pascanu
# Section 2
# http://arxiv.org/pdf/1212.0901v2.pdf
# U stores information necessary for GRU
W_hid_np = np_ortho((n_hid, n_hid), random_state)
W_in_np = np_rand((n_in, n_hid), random_state)
b_in_np = np_zeros((n_hid,))
W_out_np = np_rand((n_hid, n_out), random_state)
b_out_np = np_zeros((n_out,))
# hidden to hidden weights
W_hid = theano.shared(W_hid_np, borrow=True)
# input to hidden layer weights
W_in = theano.shared(W_in_np, borrow=True)
# input to hidden bias
b_in = theano.shared(b_in_np, borrow=True)
# hidden to output layer weights
W_out = theano.shared(W_out_np, borrow=True)
# hidden to output bias
b_out = theano.shared(b_out_np, borrow=True)
# Begin model
# linear projection from input "into" recurrent
# do it here to better exploit parallelism
proj_X = T.dot(X_check, W_in) + b_in
theano.printing.Print("proj_X.shape")(proj_X.shape)
# recurrent function (using tanh activation function)
def step(in_t, h_tm1, W_hid):
h_t = T.tanh(in_t + T.dot(h_tm1, W_hid))
theano.printing.Print("h_t.shape")(h_t.shape)
return h_t
# Another debug tip - try calling your scan function directly to debug internals
# step(X_check[0], h0_check, W_in, W_hid)
# the hidden state `h` for the entire sequence
h, _ = theano.scan(step,
sequences=[proj_X],
outputs_info=[h0_check],
non_sequences=[W_hid])
theano.printing.Print("h.shape")(h.shape)
# linear output activation
y_hat = T.dot(h, W_out) + b_out
theano.printing.Print("y_hat.shape")(y_hat.shape)
# Parameters of the model
params = [W_in, b_in, W_hid, W_out, b_out]
# error between output and target
cost = ((y_check - y_hat) ** 2).sum()
# gradients on the weights using BPTT
grads = T.grad(cost, params)
# Use stochastic gradient descent to optimize
opt = sgd(params)
learning_rate = 0.001
updates = opt.updates(params, grads, learning_rate)
# By returning h we can train while preserving hidden state from previous
# samples. This can allow for truncated backprop through time (TBPTT)!
fit_function = theano.function([X_sym, y_sym, h0], [cost, h], updates=updates)
predict_function = theano.function([X_sym, h0], [y_hat, h])
epochs = 2000
# Print 50 status updates along with last
status_points = list(range(epochs))
status_points = status_points[::epochs // 50] + [status_points[-1]]
for i in range(epochs):
ii = np.arange(minibatch_size).astype("int32")
random_state.shuffle(ii)
Xi = X[:, ii]
yi = y[:, ii]
pred, _ = predict_function(Xi, h_init)
err, _ = fit_function(Xi, yi, h_init)
if i in status_points:
print("Epoch %i: err %f" % (i, err))
# Run on self generations
n_seed = n_timesteps // 2
X_grow = X[:n_seed]
for i in range(n_timesteps // 4, n_full):
p, _ = predict_function(X_grow, h_init)
# take last prediction only
X_grow = np.concatenate((X_grow, p[-1][None]))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
f, axarr1 = plt.subplots(minibatch_size)
for i in range(minibatch_size):
# -1 to have the same dims
axarr1[i].plot(full_sines[:-1, i, 0], color="steelblue")
plt.savefig('groundtruth.png')
plt.close()
f, axarr2 = plt.subplots(minibatch_size)
for i in range(minibatch_size):
axarr2[i].plot(p[:, i, 0], color="darkred")
plt.savefig('generated.png')
plt.close()
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import unittest
from makani.avionics.bootloader import program_ui
import mock
class ProgramUiTest(unittest.TestCase):
dialog_ok = program_ui.ui_helper.dialog.Dialog.OK
dialog_cancel = program_ui.ui_helper.dialog.Dialog.CANCEL
def setUp(self):
# Patch AioClient class and its Recv method.
self._patch_aio = mock.patch(target='program_ui.aio.AioClient', spec=True)
self._patch_aio.start()
self._patch_aio_recv = program_ui.aio.AioClient.return_value.Recv
self._patch_aio_recv.side_effect = socket.timeout('No socket allowed.')
# Patch UserInterface class and its Menu method.
self._patch_ui = mock.patch(target='program_ui.ui_helper.UserInterface',
spec=True)
self._patch_ui.start()
self._patch_ui_menu = program_ui.ui_helper.UserInterface.return_value.Menu
self._patch_ui_menu.side_effect = [(self.dialog_ok, '')]
self._patch_ui_input = program_ui.ui_helper.UserInterface.return_value.Input
self._patch_ui_input.side_effect = [(self.dialog_ok, '')]
# Patch RunProcess method.
self._patch_proc_runner = mock.patch(
target='program_ui.process_helper.RunProcess',
spec=True)
self._patch_proc_runner.start()
# Replace ProgramWrapper's GetFirmwareFiles method. This removes tms570-bin
# dependency.
program_ui.ProgramWrapper.GetFirmwareFiles = mock.Mock(
return_value=['calib_file1', 'calib_file2'])
# Replace DetectAio with network failure response.
program_ui.DetectAio = mock.Mock(return_value=(tuple(), tuple()))
def ProgramArgumentChecker(self, mocked_call, exact_arguments):
self.assertEqual(mocked_call.call_count, 1)
self.assertTrue(mocked_call.call_args[0][0][1].endswith('program.py'))
self.assertEqual(mocked_call.call_args[0][0][2:], exact_arguments)
def PrepareForProgram(self):
# Set return of calls to RunProcess (likely calling program.py).
program_ui.process_helper.RunProcess.return_value = (0, '', '')
program_ui.process_helper.RunProcess.reset_mock()
# Reset UserInterface mock.
self._patch_ui_menu.reset_mock()
def GetNodeFromSnake(self, node_snake):
assert isinstance(node_snake, str)
for node in program_ui._NETWORK_CONFIG.aio_nodes:
if getattr(node, 'snake_name', '') == node_snake:
return node
raise RuntimeError('Invalid node name: %s.', node_snake)
def PreloadInput(self, answers):
self._patch_ui_input.side_effect = [(self.dialog_ok, answer) for answer in
answers]
def PreloadMenu(self, answers):
self._patch_ui_menu.side_effect = [(self.dialog_ok, answer) for answer in
answers]
def PreloadMenuNodeSelect(self, node):
self.PreloadMenu([node.label_name, str(node.enum_value)])
def CreateProgramWrapper(self, node_snake):
node = self.GetNodeFromSnake(node_snake)
category = node.label_name
# DetectAio will fail, user will select node_snake.
self.PreloadMenuNodeSelect(node)
p1 = program_ui.ProgramWrapper()
# Verify that category was offered.
menu_1_options = p1.dialog_ui.Menu.call_args_list[0][0]
codenames, desc = zip(*menu_1_options[1])
self.assertIn(category, codenames)
# Verify that node_snake was offered.
menu_2_options = p1.dialog_ui.Menu.call_args_list[1][0]
codenames, desc = zip(*menu_2_options[1])
self.assertIn(str(node.enum_value), codenames)
self.assertIn(node_snake, desc)
self.assertEqual(codenames.index(str(node.enum_value)),
desc.index(node.snake_name))
return p1
def SetIsSubset(self, subset, superset):
self.assertTrue(subset.issubset(superset),
'{} not found in {}.'.format(subset, superset))
def testGlobals(self):
self.assertTrue(hasattr(program_ui.program, 'SERIAL_PARAM_FILE_TEMPLATE'),
'Missing global variable: SERIAL_PARAM_FILE_TEMPLATE.')
def testProgramApplication(self):
self.PrepareForProgram()
node = self.GetNodeFromSnake('servo_a1')
# Create a ProgramWrapper instance.
p1 = self.CreateProgramWrapper(node.snake_name)
# Test ProgramApplication.
p1.ProgramApplication()
self.ProgramArgumentChecker(program_ui.process_helper.RunProcess,
exact_arguments=[node.snake_name])
def testProgramBootloader(self):
self.PrepareForProgram()
node = self.GetNodeFromSnake('servo_a1')
# Create a ProgramWrapper instance.
p1 = self.CreateProgramWrapper(node.snake_name)
# Test ProgramBootloader.
p1.ProgramBootloader()
self.ProgramArgumentChecker(program_ui.process_helper.RunProcess,
exact_arguments=[node.snake_name,
'--bootloader'])
def testProgramRename(self):
self.PrepareForProgram()
node = self.GetNodeFromSnake('servo_a1')
node_rename_to = self.GetNodeFromSnake('servo_a2')
# Create a ProgramWrapper instance.
p1 = self.CreateProgramWrapper(node.snake_name)
# RenameNode: Select rename_to Node.
self.PreloadMenuNodeSelect(node_rename_to)
# RenameNode: Setup Recv mock to return version data.
mock_header = mock.Mock()
mock_header.version = 0xFFFF
self._patch_aio_recv.side_effect = None
self._patch_aio_recv.return_value = (node_rename_to.ip, mock_header, None)
# Test RenameNode.
p1.RenameNode()
self.ProgramArgumentChecker(program_ui.process_helper.RunProcess,
exact_arguments=[node.snake_name, '--rename_to',
node_rename_to.snake_name])
def testProgramSerial(self):
self.PrepareForProgram()
node = self.GetNodeFromSnake('servo_a1')
# Create a ProgramWrapper instance.
p1 = self.CreateProgramWrapper(node.snake_name)
# ProgramSerial: Select hardware type and type in serial.
hardware_type_rev = ['aio', 'rev_ab']
serial_number = 'my_test_serial'
self.PreloadMenu(hardware_type_rev)
self.PreloadInput([serial_number])
# Test ProgramBootloader.
p1.ProgramSerial()
self.ProgramArgumentChecker(
program_ui.process_helper.RunProcess,
exact_arguments=([node.snake_name, '--serial'] + hardware_type_rev +
[serial_number]))
def testProgramCalib(self):
self.PrepareForProgram()
node = self.GetNodeFromSnake('motor_sbo')
# Create a ProgramWrapper instance.
p1 = self.CreateProgramWrapper(node.snake_name)
# ProgramCalib: Select calibration.
calib = p1.GetCalibNames()[0]
self.PreloadMenu([calib])
# Test ProgramBootloader.
p1.ProgramCalib()
self.ProgramArgumentChecker(
program_ui.process_helper.RunProcess,
exact_arguments=([node.snake_name, '--calib', calib]))
def testNodeSelectMenu(self):
self._patch_ui_menu.reset_mock()
self.CreateProgramWrapper('servo_a1')
self._patch_ui_menu.reset_mock()
self.CreateProgramWrapper('cs_a')
def testGetSerialRevisions(self):
self.SetIsSubset({'rev_ab', 'rev_ac', 'rev_ad', 'rev_ba'},
program_ui.GetSerialRevisions())
self.SetIsSubset({'gin_a2', 'gin_a3'},
program_ui.GetSerialRevisions(serial_type='motor'))
self.SetIsSubset({'rev_ac', 'rev_ad_clk8'},
program_ui.GetSerialRevisions(serial_type='cs'))
def testGetSerialTypes(self):
self.SetIsSubset({'aio', 'motor', 'cs'}, program_ui.GetSerialTypes())
def tearDown(self):
self._patch_proc_runner.start()
self._patch_ui.stop()
self._patch_aio.stop()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the MongoDB storage controller for queues.
Field Mappings:
In order to reduce the disk / memory space used,
field names will be, most of the time, the first
letter of their long name.
"""
from oslo_log import log as logging
from oslo_utils import timeutils
from pymongo.collection import ReturnDocument
import pymongo.errors
from zaqar.common import decorators
from zaqar.i18n import _
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.mongodb import utils
LOG = logging.getLogger(__name__)
# NOTE(kgriffs): E.g.: 'queuecontroller:exists:5083853/my-queue'
_QUEUE_CACHE_PREFIX = 'queuecontroller:'
# NOTE(kgriffs): This causes some race conditions, but they are
# harmless. If a queue was deleted, but we are still returning
# that it exists, some messages may get inserted without the
# client getting an error. In this case, those messages would
# be orphaned and expire eventually according to their TTL.
#
# What this means for the client is that they have a bug; they
# deleted a queue and then immediately tried to post messages
# to it. If they keep trying to use the queue, they will
# eventually start getting an error, once the cache entry
# expires, which should clue them in on what happened.
#
# TODO(kgriffs): Make dynamic?
_QUEUE_CACHE_TTL = 5
def _queue_exists_key(queue, project=None):
# NOTE(kgriffs): Use string concatenation for performance,
# also put project first since it is guaranteed to be
# unique, which should reduce lookup time.
return _QUEUE_CACHE_PREFIX + 'exists:' + str(project) + '/' + queue
class QueueController(storage.Queue):
"""Implements queue resource operations using MongoDB.
Queues are scoped by project, which is prefixed to the
queue name.
::
Queues:
Name Field
---------------------
name -> p_q
msg counter -> c
metadata -> m
Message Counter:
Name Field
-------------------
value -> v
modified ts -> t
"""
def __init__(self, *args, **kwargs):
super(QueueController, self).__init__(*args, **kwargs)
self._cache = self.driver.cache
self._collection = self.driver.queues_database.queues
# NOTE(flaper87): This creates a unique index for
# project and name. Using project as the prefix
# allows for querying by project and project+name.
# This is also useful for retrieving the queues list for
# a specific project, for example. Order matters!
# NOTE(wanghao): pymongo has removed the ensure_index since 4.0.0.
# So we need to update ensure_index to create_index.
self._collection.create_index([('p_q', 1)], unique=True)
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
def _get_counter(self, name, project=None):
"""Retrieves the current message counter value for a given queue.
This helper is used to generate monotonic pagination
markers that are saved as part of the message
document.
Note 1: Markers are scoped per-queue and so are *not*
globally unique or globally ordered.
Note 2: If two or more requests to this method are made
in parallel, this method will return the same counter
value. This is done intentionally so that the caller
can detect a parallel message post, allowing it to
mitigate race conditions between producer and
observer clients.
:param name: Name of the queue to which the counter is scoped
:param project: Queue's project
:returns: current message counter as an integer
"""
doc = self._collection.find_one(_get_scoped_query(name, project),
projection={'c.v': 1, '_id': 0})
if doc is None:
raise errors.QueueDoesNotExist(name, project)
return doc['c']['v']
def _inc_counter(self, name, project=None, amount=1, window=None):
"""Increments the message counter and returns the new value.
:param name: Name of the queue to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises QueueDoesNotExist: if not found
"""
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(name, project)
if window is not None:
threshold = now - window
query['c.t'] = {'$lt': threshold}
while True:
try:
doc = self._collection.find_one_and_update(
query, update, return_document=ReturnDocument.AFTER,
projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect failure')
if doc is None:
if window is None:
# NOTE(kgriffs): Since we did not filter by a time window,
# the queue should have been found and updated. Perhaps
# the queue has been deleted?
message = _(u'Failed to increment the message '
u'counter for queue %(name)s and '
u'project %(project)s')
message %= dict(name=name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(name, project)
# NOTE(kgriffs): Assume the queue existed, but the counter
# was recently updated, causing the range query on 'c.t' to
# exclude the record.
return None
return doc['c']['v']
# ----------------------------------------------------------------------
# Interface
# ----------------------------------------------------------------------
def _get(self, name, project=None):
try:
return self.get_metadata(name, project)
except errors.QueueDoesNotExist:
return {}
def _list(self, project=None, kfilter={}, marker=None,
limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False,
name=None):
query = utils.scoped_query(marker, project, name, kfilter)
projection = {'p_q': 1, '_id': 0}
if detailed:
projection['m'] = 1
cursor = self._collection.find(query, projection=projection)
cursor = cursor.limit(limit).sort('p_q')
marker_name = {}
ntotal = self._collection.count_documents(query, limit=limit)
def normalizer(record):
queue = {'name': utils.descope_queue_name(record['p_q'])}
marker_name['next'] = queue['name']
if detailed:
queue['metadata'] = record['m']
return queue
yield utils.HookedCursor(cursor, normalizer, ntotal=ntotal)
yield marker_name and marker_name['next']
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def get_metadata(self, name, project=None):
queue = self._collection.find_one(_get_scoped_query(name, project),
projection={'m': 1, '_id': 0})
if queue is None:
raise errors.QueueDoesNotExist(name, project)
return queue.get('m', {})
@utils.raises_conn_error
# @utils.retries_on_autoreconnect
def _create(self, name, metadata=None, project=None):
# NOTE(flaper87): If the connection fails after it was called
# and we retry to insert the queue, we could end up returning
# `False` because of the `DuplicatedKeyError` although the
# queue was indeed created by this API call.
#
# TODO(kgriffs): Commented out `retries_on_autoreconnect` for
# now due to the above issue, since creating a queue is less
# important to make super HA.
try:
# NOTE(kgriffs): Start counting at 1, and assume the first
# message ever posted will succeed and set t to a UNIX
# "modified at" timestamp.
counter = {'v': 1, 't': 0}
scoped_name = utils.scope_queue_name(name, project)
self._collection.insert_one(
{'p_q': scoped_name, 'm': metadata or {},
'c': counter})
except pymongo.errors.DuplicateKeyError:
return False
else:
return True
# NOTE(kgriffs): Only cache when it exists; if it doesn't exist, and
# someone creates it, we want it to be immediately visible.
@utils.raises_conn_error
@utils.retries_on_autoreconnect
@decorators.caches(_queue_exists_key, _QUEUE_CACHE_TTL, lambda v: v)
def _exists(self, name, project=None):
query = _get_scoped_query(name, project)
return self._collection.find_one(query) is not None
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def set_metadata(self, name, metadata, project=None):
rst = self._collection.update_one(_get_scoped_query(name, project),
{'$set': {'m': metadata}})
if rst.matched_count == 0:
raise errors.QueueDoesNotExist(name, project)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
@_exists.purges
def _delete(self, name, project=None):
self._collection.delete_one(_get_scoped_query(name, project))
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def _stats(self, name, project=None):
pass
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def _calculate_resource_count(self, project=None):
query = utils.scoped_query(None, project, None, {})
return self._collection.count_documents(query)
def _get_scoped_query(name, project):
return {'p_q': utils.scope_queue_name(name, project)}
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.channel_v1.types import common
from google.cloud.channel_v1.types import offers
from google.cloud.channel_v1.types import products
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.channel.v1",
manifest={
"Entitlement",
"Parameter",
"AssociationInfo",
"ProvisionedService",
"CommitmentSettings",
"RenewalSettings",
"TrialSettings",
"TransferableSku",
"TransferEligibility",
},
)
class Entitlement(proto.Message):
r"""An entitlement is a representation of a customer's ability to
use a service.
Attributes:
name (str):
Output only. Resource name of an entitlement in the form:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which the
entitlement is created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which the
entitlement is updated.
offer (str):
Required. The offer resource name for which the entitlement
is to be created. Takes the form:
accounts/{account_id}/offers/{offer_id}.
commitment_settings (google.cloud.channel_v1.types.CommitmentSettings):
Commitment settings for a commitment-based
Offer. Required for commitment based offers.
provisioning_state (google.cloud.channel_v1.types.Entitlement.ProvisioningState):
Output only. Current provisioning state of
the entitlement.
provisioned_service (google.cloud.channel_v1.types.ProvisionedService):
Output only. Service provisioning details for
the entitlement.
suspension_reasons (Sequence[google.cloud.channel_v1.types.Entitlement.SuspensionReason]):
Output only. Enumerable of all current
suspension reasons for an entitlement.
purchase_order_id (str):
Optional. This purchase order (PO)
information is for resellers to use for their
company tracking usage. If a purchaseOrderId
value is given, it appears in the API responses
and shows up in the invoice. The property
accepts up to 80 plain text characters.
trial_settings (google.cloud.channel_v1.types.TrialSettings):
Output only. Settings for trial offers.
association_info (google.cloud.channel_v1.types.AssociationInfo):
Association information to other
entitlements.
parameters (Sequence[google.cloud.channel_v1.types.Parameter]):
Extended entitlement parameters. When creating an
entitlement, valid parameter names and values are defined in
the
[Offer.parameter_definitions][google.cloud.channel.v1.Offer.parameter_definitions].
The response may include the following output-only
Parameters:
- assigned_units: The number of licenses assigned to users.
- max_units: The maximum assignable units for a flexible
offer.
- num_units: The total commitment for commitment-based
offers.
"""
class ProvisioningState(proto.Enum):
r"""Indicates the current provisioning state of the entitlement."""
PROVISIONING_STATE_UNSPECIFIED = 0
ACTIVE = 1
SUSPENDED = 5
class SuspensionReason(proto.Enum):
r"""Suspension reason for an entitlement if
[provisioning_state][google.cloud.channel.v1.Entitlement.provisioning_state]
= SUSPENDED.
"""
SUSPENSION_REASON_UNSPECIFIED = 0
RESELLER_INITIATED = 1
TRIAL_ENDED = 2
RENEWAL_WITH_TYPE_CANCEL = 3
PENDING_TOS_ACCEPTANCE = 4
OTHER = 100
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
offer = proto.Field(proto.STRING, number=8,)
commitment_settings = proto.Field(
proto.MESSAGE, number=12, message="CommitmentSettings",
)
provisioning_state = proto.Field(proto.ENUM, number=13, enum=ProvisioningState,)
provisioned_service = proto.Field(
proto.MESSAGE, number=16, message="ProvisionedService",
)
suspension_reasons = proto.RepeatedField(
proto.ENUM, number=18, enum=SuspensionReason,
)
purchase_order_id = proto.Field(proto.STRING, number=19,)
trial_settings = proto.Field(proto.MESSAGE, number=21, message="TrialSettings",)
association_info = proto.Field(proto.MESSAGE, number=23, message="AssociationInfo",)
parameters = proto.RepeatedField(proto.MESSAGE, number=26, message="Parameter",)
class Parameter(proto.Message):
r"""Definition for extended entitlement parameters.
Attributes:
name (str):
Name of the parameter.
value (google.cloud.channel_v1.types.Value):
Value of the parameter.
editable (bool):
Output only. Specifies whether this parameter is allowed to
be changed. For example, for a Google Workspace Business
Starter entitlement in commitment plan, num_units is
editable when entitlement is active.
"""
name = proto.Field(proto.STRING, number=1,)
value = proto.Field(proto.MESSAGE, number=2, message=common.Value,)
editable = proto.Field(proto.BOOL, number=3,)
class AssociationInfo(proto.Message):
r"""Association links that an entitlement has to other
entitlements.
Attributes:
base_entitlement (str):
The name of the base entitlement, for which
this entitlement is an add-on.
"""
base_entitlement = proto.Field(proto.STRING, number=1,)
class ProvisionedService(proto.Message):
r"""Service provisioned for an entitlement.
Attributes:
provisioning_id (str):
Output only. Provisioning ID of the
entitlement. For Google Workspace, this is the
underlying Subscription ID. For Google Cloud
Platform, this is the Billing Account ID of the
billing subaccount.".
product_id (str):
Output only. The product pertaining to the
provisioning resource as specified in the Offer.
sku_id (str):
Output only. The SKU pertaining to the
provisioning resource as specified in the Offer.
"""
provisioning_id = proto.Field(proto.STRING, number=1,)
product_id = proto.Field(proto.STRING, number=2,)
sku_id = proto.Field(proto.STRING, number=3,)
class CommitmentSettings(proto.Message):
r"""Commitment settings for commitment-based offers.
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Commitment start timestamp.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Commitment end timestamp.
renewal_settings (google.cloud.channel_v1.types.RenewalSettings):
Optional. Renewal settings applicable for a
commitment-based Offer.
"""
start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
renewal_settings = proto.Field(proto.MESSAGE, number=4, message="RenewalSettings",)
class RenewalSettings(proto.Message):
r"""Renewal settings for renewable Offers.
Attributes:
enable_renewal (bool):
If false, the plan will be completed at the
end date.
resize_unit_count (bool):
If true and enable_renewal = true, the unit (for example
seats or licenses) will be set to the number of active units
at renewal time.
payment_plan (google.cloud.channel_v1.types.PaymentPlan):
Describes how a reseller will be billed.
payment_cycle (google.cloud.channel_v1.types.Period):
Describes how frequently the reseller will be
billed, such as once per month.
"""
enable_renewal = proto.Field(proto.BOOL, number=1,)
resize_unit_count = proto.Field(proto.BOOL, number=2,)
payment_plan = proto.Field(proto.ENUM, number=5, enum=offers.PaymentPlan,)
payment_cycle = proto.Field(proto.MESSAGE, number=6, message=offers.Period,)
class TrialSettings(proto.Message):
r"""Settings for trial offers.
Attributes:
trial (bool):
Determines if the entitlement is in a trial or not:
- ``true`` - The entitlement is in trial.
- ``false`` - The entitlement is not in trial.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Date when the trial ends. The value is in milliseconds using
the UNIX Epoch format. See an example `Epoch
converter <https://www.epochconverter.com>`__.
"""
trial = proto.Field(proto.BOOL, number=1,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
class TransferableSku(proto.Message):
r"""TransferableSku represents information a reseller needs to
view existing provisioned services for a customer that they do
not own. Read-only.
Attributes:
transfer_eligibility (google.cloud.channel_v1.types.TransferEligibility):
Describes the transfer eligibility of a SKU.
sku (google.cloud.channel_v1.types.Sku):
The SKU pertaining to the provisioning
resource as specified in the Offer.
legacy_sku (google.cloud.channel_v1.types.Sku):
Optional. The customer to transfer has an
entitlement with the populated legacy SKU.
"""
transfer_eligibility = proto.Field(
proto.MESSAGE, number=9, message="TransferEligibility",
)
sku = proto.Field(proto.MESSAGE, number=11, message=products.Sku,)
legacy_sku = proto.Field(proto.MESSAGE, number=12, message=products.Sku,)
class TransferEligibility(proto.Message):
r"""Specifies transfer eligibility of a SKU.
Attributes:
is_eligible (bool):
Whether reseller is eligible to transfer the
SKU.
description (str):
Localized description if reseller is not
eligible to transfer the SKU.
ineligibility_reason (google.cloud.channel_v1.types.TransferEligibility.Reason):
Specified the reason for ineligibility.
"""
class Reason(proto.Enum):
r"""Reason of ineligibility."""
REASON_UNSPECIFIED = 0
PENDING_TOS_ACCEPTANCE = 1
SKU_NOT_ELIGIBLE = 2
SKU_SUSPENDED = 3
is_eligible = proto.Field(proto.BOOL, number=1,)
description = proto.Field(proto.STRING, number=2,)
ineligibility_reason = proto.Field(proto.ENUM, number=3, enum=Reason,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import urllib
import zlib
import zipfile
import math
import sys
import json
import time
import bz2
import gzip
import binascii
import requests
import random
from subprocess import *
import subprocess
import threading
import MySQLdb # See http://stackoverflow.com/questions/372885/how-do-i-connect-to-a-mysql-database-in-python
import MySQLdb.cursors
# This script will download the data from the highlander database, create a json from it, then upload it to
# the cgs system, which is, for now, constituted of a hbase database where it will save the data.
# Why do we download the data from highlander instead of using directly the parser from dbBuilder?
# Because the current dbBuilder.tojson does not give as much information as we would like for the benchmarks, that's all.
# Configuration for the user
highlander_host = "highlander.usr.hydra.vub.ac.be"
highlander_database = "Iridia"
highlander_user = "iridia"
highlander_password = "iri.2742"
local_host = "127.0.0.1"
local_database = "highlander_chromosomes"
local_user = "root"
local_password = "Olgfe65grgr"
current_server_url = 'http://62.210.254.52'
cluster_url = 'http://insilicodb.ulb.ac.be:8888'
querySession = requests.Session()
info = {'username':'gdegols','password':'z9FNeTrQJYaemAtyUVva'}
r = querySession.post(cluster_url+'/accounts/login/',data=info)
target_database = "hbase" # "hbase" or "impala_text"
global_upload_state = False # If False, we download the data. If True, we upload the data previously downloaded.
# This function returns the different samples already uploaded to hbase
def isSampleDone(sample_name, current_upload_state):
if not os.path.isfile('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt'):
return False
samples = [line.strip() for line in open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt')]
found = False
sample_name = str(sample_name)
for sample in samples:
if sample and sample_name == sample:
found = True
break
return found
def addSampleDone(sample, current_upload_state):
with open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt', 'a') as file:
file.write(str(sample)+'\r\n')
def fieldsToCheck():
with open('api.json', 'rb') as f:
fields = f.read()
fields = json.loads(fields)
# We create a list to keep/recreate the order
ordered_fields = []
for i in xrange(0,len(fields)):
ordered_fields.append(fields['c'+str(i)])
# Thanks to this code, the mapping will be 40% faster
new_fields = {}
for key in fields:
field = fields[key]
new_fields[field['highlander']] = field['json']
return new_fields, ordered_fields
# This function returns the genotype (0/0, 1/1, 0/1, 1/0 only) from a "highlander variant"
def genotypeFromVariant(variant):
if variant['zygosity'] == 'Homozygous':
if random.random() < 0.5:
return '1|1'
else:
return '0|0'
else:
if random.random() < 0.5:
return '0|1'
else:
return '1|0'
# This function is in charge to create an adapted json for the benchmarks
def tojsonForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
data = {}
i=0
for variant in variants:
data[i] = {}
# We try to match any data from highlander to json
for highlander_field in variant:
if highlander_field in fields:
data[i][fields[highlander_field]] = str(variant[highlander_field]).replace(';',',')
# Some specific information
data[i]['readGroupSets.readGroups.sampleId'] = patient # variant['project_id']
data[i]['variants.fileformat'] = 'VCFv4.1'
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_alt']
data[i]['variants.info.insert_date'] = int(time.time())
data[i]['variants.calls.genotype'] = genotypeFromVariant(variant)
i += 1
return data
# This function is in charge to create an adapted tsv for the benchmarks
def totsvForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
fields_map = {}
for field_id in xrange(0,len(ordered_fields)):
fields_map[ordered_fields[field_id]['highlander']] = field_id
"""
init_map = {}
for field_id in xrange(0,len(ordered_fields)):
init_map[ordered_fields[field_id]['highlander']] = ''
"""
tsv = []
dt = 0
for variant in variants:
# Some specific information
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_alt']
variant['insert_date'] = int(time.time())
variant['special_genotype'] = genotypeFromVariant(variant)
variant['special_fileformat'] = 'VCFv4.1'
# We create the row-key
rowkey = str(variant['project_id']) + '-' + str(variant['chr']) + '-' \
+ str(variant['pos']) + '-' + str(variant['reference']) + '-' \
+ str(variant['alternative'])
line = rowkey
# It took me some times to find the most efficient way to create the tsv line, but
# maybe there is another way to do that even faster... It takes 11.5s for the current loop
val = [''] * len(ordered_fields)
for field_name, field_place in fields_map.iteritems():
try:
if variant[field_name]:
if field_name != 'unisnp_ids' and field_name != 'dbsnp_id_141' and field_name != 'dbsnp_id_137':
val[field_place] = str(variant[field_name])
else:
val[field_place] = str(variant[field_name]).replace(';',',')
except:
pass
line += ';'.join(val)
""" 9s
j = 0
for field in ordered_fields:
try:
if variant[field['highlander']]:
j += 1
else:
j += 1
except:
j += 1
"""
""" 19s
for field in ordered_fields:
if field['highlander'] in variant and variant[field['highlander']]:
line += ';'+str(variant[field['highlander']]).replace(';',',')
else:
line += ';'
"""
""" 16s
current_map = init_map.copy()
for field, value in variant.iteritems():
if field != 'unisnp_ids':
current_map[field] = str(value)
else:
current_map[field] = str(value).replace(';',',') #.replace(';',',')
for field in ordered_fields:
line += ';'+current_map[field['highlander']]
"""
""" 16s
for field in ordered_fields:
try:
if variant[field['highlander']]:
if field['highlander'] == 'unisnp_ids':
line += ';'+str(variant[field['highlander']]).replace(';',',')
else:
line += ';'+str(variant[field['highlander']])
else:
line += ';'
except:
line += ';'
"""
tsv.append(line)
return '\n'.join(tsv)
# This function save the current variants for later
def saveForLater(cur, patient, benchmark_table):
# We download the variants for the given patient
print(patient+": Downloading data."),
st = time.time()
cur.execute("SELECT * FROM "+benchmark_table+" WHERE patient = '"+patient+"' ORDER BY id") # 15s
print("Time: "+str(round(time.time()-st,2))+"s.")
if cur.rowcount < 40000:
print(patient+": Probably incomplete data found (rows = "+str(cur.rowcount)+" < 40 000), we stop here.")
return False
# We convert the variants to a json object
print(patient+": Converting data ("+str(cur.rowcount)+" lines)."),
st = time.time()
variants = tojsonForBenchmarks(cur.fetchall(), patient)
print("Time: "+str(round(time.time()-st,2))+"s.")
# For test only
# return testIfCompressWorthIt(variants)
# We save the file to the current web server
print(patient+": Saving compressed data. "),
server_directory = '/var/www/html/cgs-41gre4gre4htrhtrthtjhty'
st = time.time()
t = json.dumps(variants)
f = gzip.open(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt.gz', 'wb')
f.write(t)
f.close()
print("Time: "+str(round(time.time()-st,2))+"s.")
return True
class patientToTSV(threading.Thread):
m_patient = None
m_f = None
m_benchmark_table = None
def setPatient(self, patient):
self.m_patient = patient
def setF(self, f):
self.m_f = f
def setBenchmarkTable(self, benchmark_table):
self.m_benchmark_table = benchmark_table
def run(self):
patient = self.m_patient
f = self.m_f
benchmark_table = self.m_benchmark_table
st = time.time()
connexion = MySQLdb.connect(host= highlander_host, user=highlander_user, passwd=highlander_password,db=highlander_database, cursorclass=MySQLdb.cursors.DictCursor, compress=False)
cur = connexion.cursor()
t = patient+": Downloading data. "
st = time.time()
cur.execute("SELECT * FROM "+benchmark_table+" WHERE patient = '"+patient+"' ORDER BY id") # 15s
print(t+ "Time: "+str(round(time.time()-st,2))+"s.")
if cur.rowcount < 40000:
print(patient+": Probably incomplete data found (rows = "+str(cur.rowcount)+" < 40 000), we stop here.")
return False
data = cur.fetchall()
cur.close()
# We convert the variants to a tsv text
t = patient+": Converting data ("+str(cur.rowcount)+" lines). "
st = time.time()
variants = totsvForBenchmarks(data, patient)
print(t+"Time: "+str(round(time.time()-st,2))+"s.")
# We save the file to the current web server
print(patient+": Saving (compressed) tsv data. "),
st = time.time()
f.write(variants)
print("Time: "+str(round(time.time()-st,2))+"s.")
connexion.close()
# This function should be only use for benchmarks purposes as we don't use json anymore
def saveToTSV(cur, sample, last_sample, benchmark_table):
print("Saving samples ["+str(sample)+";"+str(last_sample)+"[")
# We open the file
server_directory = '/var/www/html/cgs-41gre4gre4htrhtrthtjhty'
patient = 'NA'+(str(sample).zfill(5))
f = gzip.open(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.tsv.gz', 'wb')
threads = []
max_threads = 4
st_init = time.time()
for sample in xrange(sample, last_sample):
patient = 'NA'+(str(sample).zfill(5))
if True:
if len(threads) == 0:
st_init = time.time()
t = patientToTSV()
t.setDaemon(True)
t.setF(f)
t.setBenchmarkTable(benchmark_table)
t.setPatient(patient)
t.start()
threads.append(t)
if len(threads) >= max_threads:
for t in threads:
t.join()
print(str(max_threads)+" samples done in "+str(time.time()-st_init)+"s")
st_init = time.time()
threads = []
else:
# We download the variants for the given patient
print(patient+": Downloading data."),
st = time.time()
cur.execute("SELECT * FROM "+benchmark_table+" WHERE patient = '"+patient+"' ORDER BY id") # 15s
print("Time: "+str(round(time.time()-st,2))+"s.")
if cur.rowcount < 40000:
print(patient+": Probably incomplete data found (rows = "+str(cur.rowcount)+" < 40 000), we stop here.")
return False
# We convert the variants to a tsv text
print(patient+": Converting data ("+str(cur.rowcount)+" lines)."),
st = time.time()
variants = totsvForBenchmarks(cur.fetchall(), patient)
print("Time: "+str(round(time.time()-st,2))+"s.")
# We save the file to the current web server
print(patient+": Saving (compressed) tsv data. "),
st = time.time()
f.write(variants)
print("Time: "+str(round(time.time()-st,2))+"s.")
if len(threads) >= 1:
for t in threads:
t.join()
print(str(max_threads)+" samples done in "+str(time.time()-st_init)+"s")
st_init = time.time()
threads = []
f.close()
return True
# This method is in charge to upload a json of variants to hbase ZZEr4RfUy1ZWmri
# We don't use compression as it is not really necessary as this script is executed from a server: at least 10Mo/s, * 36000 = 350Go/10h.
def uploadToHbase(patient, benchmark_table):
# We make a query to the cluster, asking him to download the file
info = {'database':database,'variants':current_server_url+'/cgs-41gre4gre4htrhtrthtjhty/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt','patient':patient}
upload_state = False
attempts = 0
while not upload_state is True:
r = querySession.get(cluster_url+'/variants/benchmarks/variant/import/'+benchmark_table,params=info)
# We check the content
try:
result = json.loads(r.text)
upload_state = True
except:
with open('logs/error_upload_'+str(patient)+'_'+database+'_'+benchmark_table+'.txt', 'w') as outfile:
outfile.write(r.text)
upload_state = False
if not upload_state is True or str(result['status']) != '0':
print(patient+" Problem while uploading data. Result saved in logs/error_upload_"+str(patient)+"_"+database+"_"+benchmark_table+".txt")
attempts += 1
if attempts >= 3:
os.remove(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt')
sys.exit('A problem occurred during the downloading... Please check your logs.')
upload_state = True
# We save the result of the query to log (especially the execution time, but I'm lazzy so it will be the complete json directly)
with open('logs/success_upload_'+database+'_'+benchmark_table+'.txt', 'a') as outfile:
outfile.write(str(patient)+" : "+json.dumps(result)+"\n")
# We delete the file previously generated -> not needed anymore
# os.remove(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt')
return True
# This method allows to easily see if it is worth it to compress the data
def testIfCompressWorthIt(variants):
st = time.time()
t = json.dumps(variants)
print("Json dump: "+str(time.time()-st)+"s ("+str(len(t)/1024)+"ko).")
# We save the uncompress text
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt', 'w') as outfile:
outfile.write(t)
#json.dump(variants, outfile, sort_keys = True, ensure_ascii=False)
print("Json write: "+str(time.time()-st)+"s.")
method = "gzip"
if method == "bz2": # -> not worth it, it takes around 45s to compress 65Mo (->1.6Mo which was great), huge cpu usage for only 1 core. We could try to parallelized the stuff by compressing different files simultaneously but it's boring.
# We save the compressed text
st = time.time()
compressed = bz2.compress(t)
print("Json compress: "+str(time.time()-st)+"s.")
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt.bzip2', 'w') as outfile:
outfile.write(compressed)
#outfile.write(binascii.hexlify(compressed))
#json.dump(variants, outfile, sort_keys = True, ensure_ascii=False)
print("Json write: "+str(time.time()-st)+"s ("+str(len(t)/1024)+"ko).")
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt.bzip2', 'rb') as infile:
compressedRead = infile.read()
print("Json read compressed: "+str(time.time()-st)+"s ("+str(len(compressedRead)/1024)+"ko).")
st = time.time()
decompressed = bz2.decompress(compressedRead)
print("Json decompress: "+str(time.time()-st)+"s ("+str(len(decompressed)/1024)+"ko).")
elif method == "gzip": # -> interesting, around 6s to compress 65Mo to 2.6Mo.
# We save the compressed text
st = time.time()
f = gzip.open('/var/www/html/benchmarks/hbase_upload.txt.gz', 'wb')
f.write(t)
f.close()
print("Json compress and write: "+str(time.time()-st)+"s ("+str(os.path.getsize('/var/www/html/benchmarks/hbase_upload.txt.gz')/1024)+"ko).")
st = time.time()
f = gzip.open('/var/www/html/benchmarks/hbase_upload.txt.gz', 'rb')
decompressed = f.read()
f.close()
print("Json read and decompress: "+str(time.time()-st)+"s ("+str(len(decompressed)/1024)+"ko).")
return True
# We connect to the db
highlander_connexion = MySQLdb.connect(host= highlander_host, user=highlander_user, passwd=highlander_password,db=highlander_database, cursorclass=MySQLdb.cursors.DictCursor, compress=False)
# sudo ip add add dev tun0 172.31.236.177/24 broadcast 172.31.236.178
# We count the data available in each analysis
analyses = [('small', 200, '20_2015_04_01_benchmarks_small'), ('medium', 1000,'21_2015_04_01_benchmarks_medium')]#,('big',5000,'22_2015_04_01_benchmarks_big'),('huge',25000,'23_2015_04_01_benchmarks_huge')]
starting_sample = 100
for analysis in analyses:
cur = highlander_connexion.cursor()
# Not a good idead to use the following query as the table is huge...
#cur.execute("SELECT DISTINCT(patient) FROM "+analysis[0])
#samples = cur.fetchall()
# For each sample we will download the data, then create a json from it, and upload it to hbase
if global_upload_state is False:
increment = 1000
else:
increment = 1
for sample in xrange(starting_sample + 1, starting_sample + analysis[1], increment):
current_sample = 'NA'+(str(sample).zfill(5))
increment = max(1,min(increment, starting_sample + analysis[1] - sample))
if isSampleDone(current_sample, global_upload_state):
continue
if global_upload_state is False:
# We download the data from Highlander
#if saveForLater(cur, current_sample, analysis[0]):
if saveToTSV(cur, sample, sample+increment, analysis[0]):
addSampleDone(current_sample, False)
else:
break
continue
elif isSampleDone(current_sample, False):
# If we are in the upload state, we upload the data if it was previously downloaded
print(current_sample+": Uploading data."),
st = time.time()
if uploadToHbase(current_sample, analysis[0]):
addSampleDone(current_sample)
print("Time: "+str(round(time.time()-st,2))+"s.")
else:
print("Time: "+str(round(time.time()-st,2))+"s.")
print(current_sample+": Uploading data -> Failed. ")
else:
print(current_sample+": variants not previously downloaded.")
continue
starting_sample += analysis[1] + 1
print("The end!")
# We close the connexion
highlander_connexion.close()
|
|
from django.contrib import auth
from django.core import validators
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
import datetime
import urllib
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
# The rest of the supported algorithms are supported by hashlib, but
# hashlib is only available in Python 2.5.
try:
import hashlib
except ImportError:
if algorithm == 'md5':
import md5
return md5.new(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
import sha
return sha.new(salt + raw_password).hexdigest()
else:
if algorithm == 'md5':
return hashlib.md5(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return hashlib.sha1(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
class SiteProfileNotAvailable(Exception):
pass
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (self.content_type.app_label, self.content_type, self.name)
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True, filter_interface=models.HORIZONTAL)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
ordering = ('name',)
class Admin:
search_fields = ('name',)
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"Creates and saves a User with the given username, e-mail and password."
now = datetime.datetime.now()
user = self.model(None, username, '', '', email.strip().lower(), 'placeholder', False, True, False, now, now)
if password:
user.set_password(password)
else:
user.set_unusable_password()
user.save()
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save()
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
class User(models.Model):
"""Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, validator_list=[validators.isAlphaNumeric], help_text=_("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True, filter_interface=models.HORIZONTAL)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('username',)
class Admin:
fields = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_staff', 'is_active', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Groups'), {'fields': ('groups',)}),
)
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser')
search_fields = ('username', 'first_name', 'last_name', 'email')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"Always returns False. This is a way of comparing User objects to anonymous users."
return False
def is_authenticated(self):
"""Always return True. This is a way to tell if the user has been authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
return self.password != UNUSABLE_PASSWORD
def get_group_permissions(self):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(self))
return permissions
def has_perm(self, perm):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general.
"""
# Inactive users have no permissions.
if not self.is_active:
return False
# Superusers have all permissions.
if self.is_superuser:
return True
# Otherwise we need to check the backends.
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(self, perm):
return True
return False
def has_perms(self, perm_list):
"""Returns True if the user has each of the specified permissions."""
for perm in perm_list:
if not self.has_perm(perm):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
if not self.is_active:
return False
if self.is_superuser:
return True
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(self, app_label):
return True
return False
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
self._profile_cache = model._default_manager.get(user__id__exact=self.id)
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User)
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def has_perm(self, perm):
return False
def has_module_perms(self, module):
return False
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
|
"""Proximal Policy Optimization (clip objective)."""
from copy import deepcopy
import torch
import torch.optim as optim
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.distributions import kl_divergence
import time
import numpy as np
import os
import ray
from rl.envs import WrapEnv
class PPOBuffer:
"""
A buffer for storing trajectory data and calculating returns for the policy
and critic updates.
This container is intentionally not optimized w.r.t. to memory allocation
speed because such allocation is almost never a bottleneck for policy
gradient.
On the other hand, experience buffers are a frequent source of
off-by-one errors and other bugs in policy gradient implementations, so
this code is optimized for clarity and readability, at the expense of being
(very) marginally slower than some other implementations.
(Premature optimization is the root of all evil).
"""
def __init__(self, gamma=0.99, lam=0.95, use_gae=False):
self.states = []
self.actions = []
self.rewards = []
self.values = []
self.returns = []
self.ep_returns = [] # for logging
self.ep_lens = []
self.gamma, self.lam = gamma, lam
self.ptr, self.path_idx = 0, 0
def store(self, state, action, reward, value):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
# TODO: make sure these dimensions really make sense
self.states += [state.squeeze(0)]
self.actions += [action.squeeze(0)]
self.rewards += [reward.squeeze(0)]
self.values += [value.squeeze(0)]
self.ptr += 1
def finish_path(self, last_val=None):
if last_val is None:
last_val = np.zeros(shape=(1,))
path = slice(self.path_idx, self.ptr)
rewards = self.rewards[path]
returns = []
R = last_val.squeeze(0).copy() # Avoid copy?
for reward in reversed(rewards):
R = self.gamma * R + reward
returns.insert(0, R) # TODO: self.returns.insert(self.path_idx, R) ?
# also technically O(k^2), may be worth just reversing list
# BUG? This is adding copies of R by reference (?)
self.returns += returns
self.ep_returns += [np.sum(rewards)]
self.ep_lens += [len(rewards)]
self.path_idx = self.ptr
def get(self):
return(
self.states,
self.actions,
self.returns,
self.values
)
class PPO:
def __init__(self,
args=None,
gamma=None,
lam=None,
lr=None,
eps=None,
entropy_coeff=None,
clip=None,
epochs=None,
minibatch_size=None,
num_steps=None):
self.env_name = args['env']
self.gamma = args['gamma']
self.lam = args['lam']
self.lr = args['lr']
self.eps = args['eps']
self.entropy_coeff = args['entropy_coeff']
self.clip = args['clip']
self.minibatch_size = args['minibatch_size']
self.epochs = args['epochs']
self.num_steps = args['num_steps']
self.max_traj_len = args['max_traj_len']
self.name = args['name']
self.use_gae = args['use_gae']
self.n_proc = args['num_procs']
self.grad_clip = args['max_grad_norm']
ray.init()
@staticmethod
def add_arguments(parser):
parser.add_argument("--n_itr", type=int, default=10000,
help="Number of iterations of the learning algorithm")
parser.add_argument("--lr", type=float, default=3e-4,
help="Adam learning rate")
parser.add_argument("--eps", type=float, default=1e-5,
help="Adam epsilon (for numerical stability)")
parser.add_argument("--lam", type=float, default=0.95,
help="Generalized advantage estimate discount")
parser.add_argument("--gamma", type=float, default=0.99,
help="MDP discount")
parser.add_argument("--entropy_coeff", type=float, default=0.0,
help="Coefficient for entropy regularization")
parser.add_argument("--clip", type=float, default=0.2,
help="Clipping parameter for PPO surrogate loss")
parser.add_argument("--minibatch_size", type=int, default=64,
help="Batch size for PPO updates")
parser.add_argument("--epochs", type=int, default=10,
help="Number of optimization epochs per PPO update")
parser.add_argument("--num_steps", type=int, default=5096,
help="Number of sampled timesteps per gradient estimate")
parser.add_argument("--use_gae", type=bool, default=True,
help="Whether or not to calculate returns using Generalized Advantage Estimation")
parser.add_argument("--num_procs", type=int, default=1,
help="Number of threads to train on")
parser.add_argument("--max_grad_norm", type=float, default=0.5,
help="Value to clip gradients at.")
parser.add_argument("--max_traj_len", type=int, default=1000,
help="Max episode horizon")
def save(self, policy):
policy.env_name = self.env_name
save_path = os.path.join("./trained_models", "ppo")
try:
os.makedirs(save_path)
except OSError:
pass
filetype = ".pt" # pytorch model
torch.save(policy, os.path.join("./trained_models", self.name + filetype))
@ray.remote
@torch.no_grad()
def sample(self, env_fn, policy, min_steps, max_traj_len, deterministic=False):
"""
Sample at least min_steps number of total timesteps, truncating
trajectories only if they exceed max_traj_len number of timesteps
"""
env = WrapEnv(env_fn) # TODO
memory = PPOBuffer(self.gamma, self.lam)
num_steps = 0
while num_steps < min_steps:
state = torch.Tensor(env.reset())
done = False
value = 0
traj_len = 0
while not done and traj_len < max_traj_len:
value, action = policy.act(state, deterministic)
next_state, reward, done, _ = env.step(action.numpy())
memory.store(state.numpy(), action.numpy(), reward, value.numpy())
state = torch.Tensor(next_state)
traj_len += 1
num_steps += 1
value, _ = policy.act(state)
memory.finish_path(last_val=(not done) * value.numpy())
return memory
def sample_parallel(self, env_fn, policy, min_steps, max_traj_len, deterministic=False):
worker = self.sample
args = (self, env_fn, policy, min_steps, max_traj_len, deterministic)
# Don't don't bother launching another process for single thread
if self.n_proc > 1:
result = ray.get([worker.remote(*args) for _ in range(self.n_proc)])
else:
result = [worker._function(*args)]
# O(n)
def merge(buffers):
merged = PPOBuffer(self.gamma, self.lam)
for buf in buffers:
merged.states += buf.states
merged.actions += buf.actions
merged.rewards += buf.rewards
merged.values += buf.values
merged.returns += buf.returns
merged.ep_returns += buf.ep_returns
merged.ep_lens += buf.ep_lens
return merged
return merge(result)
def train(self,
env_fn,
policy,
n_itr,
logger=None):
old_policy = deepcopy(policy)
optimizer = optim.Adam(policy.parameters(), lr=self.lr, eps=self.eps)
start_time = time.time()
for itr in range(n_itr):
print("********** Iteration {} ************".format(itr))
sample_start = time.time()
batch = self.sample_parallel(env_fn, policy, self.num_steps, self.max_traj_len)
print("time elapsed: {:.2f} s".format(time.time() - start_time))
print("sample time elapsed: {:.2f} s".format(time.time() - sample_start))
observations, actions, returns, values = map(torch.Tensor, batch.get())
advantages = returns - values
advantages = (advantages - advantages.mean()) / (advantages.std() + self.eps)
minibatch_size = self.minibatch_size or advantages.numel()
print("timesteps in batch: %i" % advantages.numel())
old_policy.load_state_dict(policy.state_dict()) # WAY faster than deepcopy
optimizer_start = time.time()
for _ in range(self.epochs):
losses = []
sampler = BatchSampler(
SubsetRandomSampler(range(advantages.numel())),
minibatch_size,
drop_last=True
)
for indices in sampler:
indices = torch.LongTensor(indices)
obs_batch = observations[indices]
action_batch = actions[indices]
return_batch = returns[indices]
advantage_batch = advantages[indices]
values, pdf = policy.evaluate(obs_batch)
# TODO, move this outside loop?
with torch.no_grad():
_, old_pdf = old_policy.evaluate(obs_batch)
old_log_probs = old_pdf.log_prob(action_batch).sum(-1, keepdim=True)
log_probs = pdf.log_prob(action_batch).sum(-1, keepdim=True)
ratio = (log_probs - old_log_probs).exp()
cpi_loss = ratio * advantage_batch
clip_loss = ratio.clamp(1.0 - self.clip, 1.0 + self.clip) * advantage_batch
actor_loss = -torch.min(cpi_loss, clip_loss).mean()
critic_loss = 0.5 * (return_batch - values).pow(2).mean()
entropy_penalty = -self.entropy_coeff * pdf.entropy().mean()
# TODO: add ability to optimize critic and actor seperately, with different learning rates
optimizer.zero_grad()
(actor_loss + critic_loss + entropy_penalty).backward()
# Clip the gradient norm to prevent "unlucky" minibatches from
# causing pathalogical updates
torch.nn.utils.clip_grad_norm_(policy.parameters(), self.grad_clip)
optimizer.step()
losses.append([actor_loss.item(),
pdf.entropy().mean().item(),
critic_loss.item(),
ratio.mean().item()])
# TODO: add verbosity arguments to suppress this
print(' '.join(["%g"%x for x in np.mean(losses, axis=0)]))
# Early stopping
if kl_divergence(pdf, old_pdf).mean() > 0.02:
print("Max kl reached, stopping optimization early.")
break
print("optimizer time elapsed: {:.2f} s".format(time.time() - optimizer_start))
if logger is not None:
evaluate_start = time.time()
test = self.sample_parallel(env_fn, policy, 800 // self.n_proc, self.max_traj_len, deterministic=True)
print("evaluate time elapsed: {:.2f} s".format(time.time() - evaluate_start))
_, pdf = policy.evaluate(observations)
_, old_pdf = old_policy.evaluate(observations)
entropy = pdf.entropy().mean().item()
kl = kl_divergence(pdf, old_pdf).mean().item()
logger.record("Return (test)", np.mean(test.ep_returns))
logger.record("Return (batch)", np.mean(batch.ep_returns))
logger.record("Mean Eplen", np.mean(batch.ep_lens))
logger.record("Mean KL Div", kl)
logger.record("Mean Entropy", entropy)
logger.dump()
# TODO: add option for how often to save model
if itr % 10 == 0:
self.save(policy)
|
|
from __future__ import absolute_import
import os
import time
import zipfile
from six import BytesIO, text_type
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, TestCase
from sentry.models import debugfile, File, ProjectDebugFile, DifMeta
# This is obviously a freely generated UUID and not the checksum UUID.
# This is permissible if users want to send different UUIDs
PROGUARD_UUID = text_type("6dc7fdb0-d2fb-4c8e-9d6b-bb1aa98929b1")
PROGUARD_SOURCE = b"""\
org.slf4j.helpers.Util$ClassContextSecurityManager -> org.a.b.g$a:
65:65:void <init>() -> <init>
67:67:java.lang.Class[] getClassContext() -> getClassContext
65:65:void <init>(org.slf4j.helpers.Util$1) -> <init>
"""
class DebugFileTest(TestCase):
def test_delete_dif(self):
dif = self.create_dif_file(
debug_id="dfb8e43a-f242-3d73-a453-aeb6a777ef75-feedface", features=["debug", "unwind"]
)
dif_id = dif.id
dif.delete()
assert not ProjectDebugFile.objects.filter(id=dif_id).exists()
assert not File.objects.filter(id=dif.file.id).exists()
def test_find_dif_by_debug_id(self):
debug_id1 = "dfb8e43a-f242-3d73-a453-aeb6a777ef75"
debug_id2 = "19bd7a09-3e31-4911-a5cd-8e829b845407"
debug_id3 = "7d402821-fae6-4ebc-bbb2-152f8e3b3352"
self.create_dif_file(debug_id=debug_id1)
dif1 = self.create_dif_file(debug_id=debug_id1)
dif2 = self.create_dif_file(debug_id=debug_id2)
difs = ProjectDebugFile.objects.find_by_debug_ids(
project=self.project, debug_ids=[debug_id1, debug_id2, debug_id3]
)
assert difs[debug_id1].id == dif1.id
assert difs[debug_id2].id == dif2.id
assert debug_id3 not in difs
def test_find_dif_by_feature(self):
debug_id1 = "dfb8e43a-f242-3d73-a453-aeb6a777ef75"
debug_id2 = "19bd7a09-3e31-4911-a5cd-8e829b845407"
debug_id3 = "7d402821-fae6-4ebc-bbb2-152f8e3b3352"
self.create_dif_file(debug_id=debug_id1, features=["debug"])
dif1 = self.create_dif_file(debug_id=debug_id1, features=["debug"])
self.create_dif_file(debug_id=debug_id1, features=["unwind"])
dif2 = self.create_dif_file(debug_id=debug_id2)
difs = ProjectDebugFile.objects.find_by_debug_ids(
project=self.project, debug_ids=[debug_id1, debug_id2, debug_id3], features=["debug"]
)
assert difs[debug_id1].id == dif1.id
assert difs[debug_id2].id == dif2.id
assert debug_id3 not in difs
def test_find_dif_by_features(self):
debug_id1 = "dfb8e43a-f242-3d73-a453-aeb6a777ef75"
debug_id2 = "19bd7a09-3e31-4911-a5cd-8e829b845407"
debug_id3 = "7d402821-fae6-4ebc-bbb2-152f8e3b3352"
dif1 = self.create_dif_file(debug_id=debug_id1, features=["debug", "unwind"])
self.create_dif_file(debug_id=debug_id1, features=["debug"])
self.create_dif_file(debug_id=debug_id1, features=["unwind"])
dif2 = self.create_dif_file(debug_id=debug_id2)
difs = ProjectDebugFile.objects.find_by_debug_ids(
project=self.project,
debug_ids=[debug_id1, debug_id2, debug_id3],
features=["debug", "unwind"],
)
assert difs[debug_id1].id == dif1.id
assert difs[debug_id2].id == dif2.id
assert debug_id3 not in difs
def test_find_legacy_dif_by_features(self):
debug_id1 = "dfb8e43a-f242-3d73-a453-aeb6a777ef75"
self.create_dif_file(debug_id=debug_id1)
dif1 = self.create_dif_file(debug_id=debug_id1)
# XXX: If no file has features, in a group, the newest one is chosen,
# regardless of the required feature set.
difs = ProjectDebugFile.objects.find_by_debug_ids(
project=self.project, debug_ids=[debug_id1], features=["debug"]
)
assert difs[debug_id1].id == dif1.id
def test_find_dif_miss_by_features(self):
debug_id = "dfb8e43a-f242-3d73-a453-aeb6a777ef75"
self.create_dif_file(debug_id=debug_id, features=[])
difs = ProjectDebugFile.objects.find_by_debug_ids(
project=self.project, debug_ids=[debug_id], features=["debug"]
)
assert debug_id not in difs
class CreateDebugFileTest(APITestCase):
@property
def file_path(self):
return os.path.join(os.path.dirname(__file__), "fixtures", "crash.dsym")
def create_dif(self, fileobj=None, file=None, **kwargs):
args = {
"file_format": "macho",
"arch": "x86_64",
"debug_id": "67e9247c-814e-392b-a027-dbde6748fcbf",
"data": {"features": ["debug"]},
"path": "crash.dsym",
}
args.update(kwargs)
return debugfile.create_dif_from_id(
self.project, DifMeta(**args), fileobj=fileobj, file=file
)
def test_create_dif_from_file(self):
file = self.create_file(
name="crash.dsym", checksum="dc1e3f3e411979d336c3057cce64294f3420f93a"
)
dif, created = self.create_dif(file=file)
assert created
assert dif is not None
assert dif.file.type == "project.dif"
assert "Content-Type" in dif.file.headers
assert ProjectDebugFile.objects.filter(id=dif.id).exists()
def test_create_dif_from_fileobj(self):
with open(self.file_path) as f:
dif, created = self.create_dif(fileobj=f)
assert created
assert dif is not None
assert dif.file.type == "project.dif"
assert "Content-Type" in dif.file.headers
assert ProjectDebugFile.objects.filter(id=dif.id).exists()
def test_keep_disjoint_difs(self):
file = self.create_file(
name="crash.dsym", checksum="dc1e3f3e411979d336c3057cce64294f3420f93a"
)
dif1, created1 = self.create_dif(file=file, data={"features": ["unwind"]})
file = self.create_file(
name="crash.dsym", checksum="2b92c5472f4442a27da02509951ea2e0f529511c"
)
dif2, created2 = self.create_dif(file=file, data={"features": ["debug"]})
assert created1 and created2
assert ProjectDebugFile.objects.filter(id=dif1.id).exists()
assert ProjectDebugFile.objects.filter(id=dif2.id).exists()
def test_keep_overlapping_difs(self):
file = self.create_file(
name="crash.dsym", checksum="dc1e3f3e411979d336c3057cce64294f3420f93a"
)
dif1, created1 = self.create_dif(file=file, data={"features": ["symtab", "unwind"]})
file = self.create_file(
name="crash.dsym", checksum="2b92c5472f4442a27da02509951ea2e0f529511c"
)
dif2, created2 = self.create_dif(file=file, data={"features": ["symtab", "debug"]})
assert created1 and created2
assert ProjectDebugFile.objects.filter(id=dif1.id).exists()
assert ProjectDebugFile.objects.filter(id=dif2.id).exists()
def test_keep_latest_dif(self):
file = self.create_file(
name="crash.dsym", checksum="dc1e3f3e411979d336c3057cce64294f3420f93a"
)
dif1, created1 = self.create_dif(file=file, data={"features": ["debug", "unwind"]})
file = self.create_file(
name="crash.dsym", checksum="2b92c5472f4442a27da02509951ea2e0f529511c"
)
dif2, created2 = self.create_dif(file=file, data={"features": ["debug"]})
file = self.create_file(
name="crash.dsym", checksum="3c60980275c4adc81a657f6aae00e11ed528b538"
)
dif3, created3 = self.create_dif(file=file, data={"features": []})
# XXX: dif2 and dif3 would actually be redundant, but since they are more
# recent than dif1 we keep both of them. This assumes that newer uploads
# might contain more specific debug information and should therefore
# receive precedence over older ones.
assert created1 and created2 and created3
assert ProjectDebugFile.objects.filter(id=dif1.id).exists()
assert ProjectDebugFile.objects.filter(id=dif2.id).exists()
assert ProjectDebugFile.objects.filter(id=dif3.id).exists()
def test_skip_redundant_dif(self):
with open(self.file_path) as f:
dif1, created1 = self.create_dif(fileobj=f)
with open(self.file_path) as f:
dif2, created2 = self.create_dif(fileobj=f)
assert created1
assert not created2
assert dif1 == dif2
def test_remove_redundant_dif(self):
file = self.create_file(
name="crash.dsym", checksum="dc1e3f3e411979d336c3057cce64294f3420f93a"
)
dif1, created1 = self.create_dif(file=file, data={"features": ["debug"]})
file = self.create_file(
name="crash.dsym", checksum="2b92c5472f4442a27da02509951ea2e0f529511c"
)
dif2, created2 = self.create_dif(file=file, data={"features": ["debug"]})
assert created1 and created2
assert not ProjectDebugFile.objects.filter(id=dif1.id).exists()
assert ProjectDebugFile.objects.filter(id=dif2.id).exists()
class DebugFilesClearTest(APITestCase):
def test_simple_cache_clear(self):
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-dsym-files",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.writestr("proguard/%s.txt" % PROGUARD_UUID, PROGUARD_SOURCE)
f.writestr("ignored-file.txt", b"This is just some stuff")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.data) == 1
assert response.data[0]["headers"] == {"Content-Type": "text/x-proguard+plain"}
assert response.data[0]["sha1"] == "e6d3c5185dac63eddfdc1a5edfffa32d46103b44"
assert response.data[0]["uuid"] == PROGUARD_UUID
assert response.data[0]["objectName"] == "proguard-mapping"
assert response.data[0]["cpuName"] == "any"
assert response.data[0]["symbolType"] == "proguard"
difs = ProjectDebugFile.difcache.fetch_difs(
project=project, debug_ids=[PROGUARD_UUID], features=["mapping"]
)
assert len(difs) == 1
assert os.path.isfile(difs[PROGUARD_UUID])
# if we clear now, nothing happens
ProjectDebugFile.difcache.clear_old_entries()
assert os.path.isfile(difs[PROGUARD_UUID])
# Put the time into the future
real_time = time.time
time.time = lambda: real_time() + 60 * 60 * 48
try:
ProjectDebugFile.difcache.clear_old_entries()
finally:
time.time = real_time
# But it's gone now
assert not os.path.isfile(difs[PROGUARD_UUID])
|
|
# -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.sessions import session
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from website import util
from website import settings
from website import language
from website.util import paths
from website.util import sanitize
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
return {
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'js_str': lambda x: x.replace("'", r"\'").replace('"', r'\"'),
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request.url, auto=True),
'access_token': session.data.get('auth_user_access_token') or '',
'auth_url': cas.get_login_url(request.url),
'profile_url': cas.get_profile_url(),
}
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', render_mako_string)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('dashboard'))
status.push_status_message(language.LOGOUT, 'success')
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule('/<path:_>', ['get', 'post'], HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string)),
Rule('/api/v1/<path:_>', ['get', 'post'],
HTTPError(http.NOT_FOUND), json_renderer),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule('/dashboard/', 'get', website_views.dashboard, OsfWebRenderer('dashboard.mako')),
Rule('/reproducibility/', 'get',
website_views.reproducibility, OsfWebRenderer('', render_mako_string)),
Rule('/about/', 'get', website_views.redirect_about, json_renderer,),
Rule('/howosfworks/', 'get', website_views.redirect_howosfworks, json_renderer,),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako')),
Rule('/getting-started/', 'get', {}, OsfWebRenderer('public/pages/getting_started.mako')),
Rule('/explore/', 'get', {}, OsfWebRenderer('public/explore.mako')),
Rule(['/messages/', '/help/'], 'get', {}, OsfWebRenderer('public/comingsoon.mako')),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako'),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako'),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako'),
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule('/news/', 'get', {}, OsfWebRenderer('public/pages/news.mako')),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako'),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/dashboard/get_nodes/', 'get', website_views.get_dashboard_nodes, json_renderer),
Rule(
[
'/dashboard/<nid>',
'/dashboard/',
],
'get', website_views.get_dashboard, json_renderer),
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/',
'/project/<pid>/node/<nid>/comments/',
],
'get',
project_views.comment.list_comments,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/discussion/',
'/project/<pid>/node/<nid>/comments/discussion/',
],
'get',
project_views.comment.comment_discussion,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/',
'/project/<pid>/node/<nid>/comment/',
],
'post',
project_views.comment.add_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'put',
project_views.comment.edit_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'delete',
project_views.comment.delete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/undelete/',
'/project/<pid>/node/<nid>/comment/<cid>/undelete/',
],
'put',
project_views.comment.undelete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/report/',
'/project/<pid>/node/<nid>/comment/<cid>/report/',
],
'post',
project_views.comment.report_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/unreport/',
'/project/<pid>/node/<nid>/comment/<cid>/unreport/',
],
'post',
project_views.comment.unreport_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule('/explore/activity/', 'get', discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako')),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
OsfWebRenderer('error.mako', render_mako_string)
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string)
),
# Resend confirmation URL linked to in CAS login page
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string)
),
# TODO: Remove `auth_register_post`
Rule('/register/', 'post', auth_views.auth_register_post,
OsfWebRenderer('public/login.mako')),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(['/login/', '/account/'], 'get',
auth_views.auth_login, OsfWebRenderer('public/login.mako')),
Rule('/login/first/', 'get', auth_views.auth_login,
OsfWebRenderer('public/login.mako'),
endpoint_suffix='__first', view_kwargs={'first': True}),
Rule('/logout/', 'get', auth_views.auth_logout, notemplate),
Rule('/forgotpassword/', 'get', auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako')),
Rule('/forgotpassword/', 'post', auth_views.forgot_password_post,
OsfWebRenderer('public/login.mako')),
Rule([
'/midas/', '/summit/', '/accountbeta/', '/decline/'
], 'get', auth_views.auth_registerbeta, OsfWebRenderer('', render_mako_string)),
Rule('/login/connected_tools/',
'get',
landing_page_views.connected_tools,
OsfWebRenderer('public/login_landing.mako')),
Rule('/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
OsfWebRenderer('public/login_landing.mako')),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
["/user/merge/"],
'get',
auth_views.merge_user_get,
OsfWebRenderer("merge_accounts.mako", trust=False)
),
Rule(
["/user/merge/"],
'post',
auth_views.merge_user_post,
OsfWebRenderer("merge_accounts.mako", trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule('/search/', 'get', {}, OsfWebRenderer('search.mako')),
Rule('/share/', 'get', {}, OsfWebRenderer('share_search.mako')),
Rule('/share/registration/', 'get', {'register': settings.SHARE_REGISTRATION_URL}, OsfWebRenderer('share_registration.mako')),
Rule('/share/help/', 'get', {'help': settings.SHARE_API_DOCS_URL}, OsfWebRenderer('share_api_docs.mako')),
Rule('/share_dashboard/', 'get', {}, OsfWebRenderer('share_dashboard.mako')),
Rule('/share/atom/', 'get', search_views.search_share_atom, xml_renderer),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Project
# Web
process_rules(app, [
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako')),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('index.mako')),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, OsfWebRenderer('project/project.mako')),
# Create a new subproject/component
Rule('/project/<pid>/newnode/', 'post', project_views.node.project_new_node,
OsfWebRenderer('', render_mako_string)),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako')),
Rule('/api/v1/folder/<nid>', 'post', project_views.node.folder_new_post, json_renderer),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako'),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako')
),
# Permissions
Rule(
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako')
),
### Logs ###
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, OsfWebRenderer('project/forks.mako')),
# Registrations
Rule([
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
], 'get', project_views.register.node_register_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
], 'get', project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako')),
Rule([
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
], 'get', project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako')),
Rule([
'/project/<pid>/retraction/approve/<token>/',
'/project/<pid>/node/<nid>/retraction/approve/<token>/',
], 'get', project_views.register.node_registration_retraction_approve,
OsfWebRenderer('error.mako', render_mako_string)),
Rule([
'/project/<pid>/retraction/disapprove/<token>/',
'/project/<pid>/node/<nid>/retraction/disapprove/<token>/',
], 'get', project_views.register.node_registration_retraction_disapprove,
OsfWebRenderer('error.mako', render_mako_string)),
Rule([
'/project/<pid>/embargo/approve/<token>/',
'/project/<pid>/node/<nid>/embargo/approve/<token>/',
], 'get', project_views.register.node_registration_embargo_approve,
OsfWebRenderer('error.mako', render_mako_string)),
Rule([
'/project/<pid>/embargo/disapprove/<token>/',
'/project/<pid>/node/<nid>/embargo/disapprove/<token>/',
], 'get', project_views.register.node_registration_embargo_disapprove,
OsfWebRenderer('error.mako', render_mako_string)),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako')),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako'),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako')
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako'),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by projectCreator.js
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule([
'/project/<pid>/expand/',
'/project/<pid>/node/<nid>/expand/',
], 'post', project_views.node.expand, json_renderer),
Rule([
'/project/<pid>/collapse/',
'/project/<pid>/node/<nid>/collapse/',
], 'post', project_views.node.collapse, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointers/',
],
'delete',
project_views.node.remove_pointers_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>',
],
'delete',
project_views.node.delete_folder,
json_renderer,
),
Rule('/folder/', 'put', project_views.node.add_folder, json_renderer),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_folder_pointers/'
], 'get', project_views.node.get_folder_pointers, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule([
'/project/<pid>/get_most_in_common_contributors/',
'/project/<pid>/node/<nid>/get_most_in_common_contributors/',
], 'get', project_views.contributor.get_most_in_common_contributors, json_renderer),
Rule([
'/project/<pid>/get_recently_added_contributors/',
'/project/<pid>/node/<nid>/get_recently_added_contributors/',
], 'get', project_views.contributor.get_recently_added_contributors, json_renderer),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
Rule([
'/project/<pid>/beforeremovecontributors/',
'/project/<pid>/node/<nid>/beforeremovecontributors/',
], 'post', project_views.contributor.project_before_remove_contributor, json_renderer),
# TODO(sloria): should be a delete request to /contributors/
Rule([
'/project/<pid>/removecontributors/',
'/project/<pid>/node/<nid>/removecontributors/',
], 'post', project_views.contributor.project_removecontributor, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'post', project_views.register.node_register_template_page_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Wiki ###
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
),
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
|
|
u'''
Created on Sep 13, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import os, io, sys, json
from collections import defaultdict
from arelle import XbrlConst
from arelle.ModelObject import ModelObject
from arelle.ModelValue import QName
from arelle.ModelFormulaObject import Aspect
from arelle.ModelRenderingObject import (ModelEuTable, ModelTable, ModelBreakdown,
ModelEuAxisCoord, ModelDefinitionNode, ModelClosedDefinitionNode, ModelRuleDefinitionNode,
ModelRelationshipDefinitionNode, ModelSelectionDefinitionNode, ModelFilterDefinitionNode,
ModelConceptRelationshipDefinitionNode, ModelDimensionRelationshipDefinitionNode,
ModelCompositionDefinitionNode, ModelTupleDefinitionNode, StructuralNode,
ROLLUP_NOT_ANALYZED, CHILDREN_BUT_NO_ROLLUP, CHILD_ROLLUP_FIRST, CHILD_ROLLUP_LAST,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.PrototypeInstanceObject import FactPrototype
RENDER_UNITS_PER_CHAR = 16 # nominal screen units per char for wrapLength computation and adjustment
class ResolutionException(Exception):
def __init__(self, code, message, **kwargs):
self.kwargs = kwargs
self.code = code
self.message = message
self.args = ( self.__repr__(), )
def __repr__(self):
return _(u'[{0}] exception {1}').format(self.code, self.message % self.kwargs)
def resolveAxesStructure(view, viewTblELR):
if isinstance(viewTblELR, (ModelEuTable, ModelTable)):
# called with a modelTable instead of an ELR
# find an ELR for this table object
table = viewTblELR
for rel in view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011)).fromModelObject(table):
# find relationships in table's linkrole
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011), rel.linkrole)
return resolveTableAxesStructure(view, table,
view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011), rel.linkrole))
# no relationships from table found
return (None, None, None, None)
# called with an ELR or list of ELRs
tblAxisRelSet = view.modelXbrl.relationshipSet(XbrlConst.euTableAxis, viewTblELR)
if len(tblAxisRelSet.modelRelationships) > 0:
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet(XbrlConst.euAxisMember, viewTblELR)
else: # try 2011 roles
tblAxisRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011), viewTblELR)
view.axisSubtreeRelSet = view.modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011), viewTblELR)
if tblAxisRelSet is None or len(tblAxisRelSet.modelRelationships) == 0:
view.modelXbrl.modelManager.addToLog(_(u"no table relationships for {0}").format(viewTblELR))
return (None, None, None, None)
# table name
modelRoleTypes = view.modelXbrl.roleTypes.get(viewTblELR)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
view.roledefinition = modelRoleTypes[0].definition
if view.roledefinition is None or view.roledefinition == u"":
view.roledefinition = os.path.basename(viewTblELR)
try:
for table in tblAxisRelSet.rootConcepts:
return resolveTableAxesStructure(view, table, tblAxisRelSet)
except ResolutionException, ex:
view.modelXbrl.error(ex.code, ex.message, exc_info=True, **ex.kwargs);
return (None, None, None, None)
def resolveTableAxesStructure(view, table, tblAxisRelSet):
view.dataCols = 0
view.dataRows = 0
view.colHdrNonStdRoles = []
view.colHdrDocRow = False
view.colHdrCodeRow = False
view.colHdrRows = 0
view.rowHdrNonStdRoles = []
view.rowHdrCols = 0
view.rowHdrColWidth = [0,]
view.rowNonAbstractHdrSpanMin = [0,]
view.rowHdrDocCol = False
view.rowHdrCodeCol = False
view.zAxisRows = 0
view.aspectModel = table.aspectModel
view.zmostOrdCntx = None
view.modelTable = table
view.topRollup = {u"x": ROLLUP_NOT_ANALYZED, u"y": ROLLUP_NOT_ANALYZED}
view.aspectEntryObjectId = 0
view.modelTable = table
view.rendrCntx = table.renderingXPathContext
xTopStructuralNode = yTopStructuralNode = zTopStructuralNode = None
# must be cartesian product of top level relationships
tblAxisRels = tblAxisRelSet.fromModelObject(table)
facts = table.filteredFacts(view.rendrCntx, view.modelXbrl.factsInInstance) # apply table filters
view.breakdownNodes = defaultdict(list) # breakdown nodes
for tblAxisRel in tblAxisRels:
definitionNode = tblAxisRel.toModelObject
addBreakdownNode(view, tblAxisRel.axisDisposition, definitionNode)
# do z's first to set variables needed by x and y axes expressions
for disposition in (u"z", u"x", u"y"):
for i, tblAxisRel in enumerate(tblAxisRels):
definitionNode = tblAxisRel.toModelObject
if (tblAxisRel.axisDisposition == disposition and
isinstance(definitionNode, (ModelEuAxisCoord, ModelBreakdown, ModelDefinitionNode))):
if disposition == u"x" and xTopStructuralNode is None:
xTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, view.zmostOrdCntx, tableNode=table, rendrCntx=view.rendrCntx)
xTopStructuralNode.hasOpenNode = False
if isinstance(definitionNode,(ModelBreakdown, ModelClosedDefinitionNode)) and definitionNode.parentChildOrder is not None:
#addBreakdownNode(view, disposition, definitionNode)
view.xTopRollup = CHILD_ROLLUP_LAST if definitionNode.parentChildOrder == u"children-first" else CHILD_ROLLUP_FIRST
expandDefinition(view, xTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
view.dataCols = xTopStructuralNode.leafNodeCount
break
elif disposition == u"y" and yTopStructuralNode is None:
yTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, view.zmostOrdCntx, tableNode=table, rendrCntx=view.rendrCntx)
yTopStructuralNode.hasOpenNode = False
if isinstance(definitionNode,(ModelBreakdown, ModelClosedDefinitionNode)) and definitionNode.parentChildOrder is not None:
#addBreakdownNode(view, disposition, definitionNode)
view.yAxisChildrenFirst.set(definitionNode.parentChildOrder == u"children-first")
view.yTopRollup = CHILD_ROLLUP_LAST if definitionNode.parentChildOrder == u"children-first" else CHILD_ROLLUP_FIRST
expandDefinition(view, yTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
view.dataRows = yTopStructuralNode.leafNodeCount
break
elif disposition == u"z" and zTopStructuralNode is None:
zTopStructuralNode = StructuralNode(None, definitionNode, definitionNode, tableNode=table, rendrCntx=view.rendrCntx)
zTopStructuralNode._choiceStructuralNodes = []
zTopStructuralNode.hasOpenNode = False
#addBreakdownNode(view, disposition, definitionNode)
expandDefinition(view, zTopStructuralNode, definitionNode, definitionNode, 1, disposition, facts, i, tblAxisRels)
break
u'''
def jsonDefaultEncoder(obj):
if isinstance(obj, StructuralNode):
return {'1StructNode': str(obj),
'2Depth': obj.structuralDepth,
'2Group': obj.breakdownNode(view.tblELR).genLabel(),
'3Label': obj.header() or obj.xlinkLabel,
'4ChildNodes': obj.childStructuralNodes}
raise TypeError("Type {} is not supported for json output".format(type(obj).__name__))
with io.open(r"c:\temp\test.json", 'wt') as fh:
json.dump({"x":xTopStructuralNode, "y":yTopStructuralNode, "z":zTopStructuralNode},
fh,
sort_keys=True,
ensure_ascii=False,
indent=2,
default=jsonDefaultEncoder)
'''
view.colHdrTopRow = view.zAxisRows + 1 # need rest if combobox used (2 if view.zAxisRows else 1)
for i in xrange(view.rowHdrCols):
if view.rowNonAbstractHdrSpanMin[i]:
lastRowMinWidth = view.rowNonAbstractHdrSpanMin[i] - sum(view.rowHdrColWidth[i] for j in xrange(i, view.rowHdrCols - 1))
if lastRowMinWidth > view.rowHdrColWidth[view.rowHdrCols - 1]:
view.rowHdrColWidth[view.rowHdrCols - 1] = lastRowMinWidth
#view.rowHdrColWidth = (60,60,60,60,60,60,60,60,60,60,60,60,60,60)
# use as wraplength for all row hdr name columns 200 + fixed indent and abstract mins (not incl last name col)
view.rowHdrWrapLength = 200 + sum(view.rowHdrColWidth[:view.rowHdrCols + 1])
view.dataFirstRow = view.colHdrTopRow + view.colHdrRows + len(view.colHdrNonStdRoles)
view.dataFirstCol = 1 + view.rowHdrCols + len(view.rowHdrNonStdRoles)
#view.dataFirstRow = view.colHdrTopRow + view.colHdrRows + view.colHdrDocRow + view.colHdrCodeRow
#view.dataFirstCol = 1 + view.rowHdrCols + view.rowHdrDocCol + view.rowHdrCodeCol
#for i in range(view.dataFirstRow + view.dataRows):
# view.gridView.rowconfigure(i)
#for i in range(view.dataFirstCol + view.dataCols):
# view.gridView.columnconfigure(i)
# organize hdrNonStdRoles so code (if any) is after documentation (if any)
for hdrNonStdRoles in (view.colHdrNonStdRoles, view.rowHdrNonStdRoles):
iCodeRole = -1
for i, hdrNonStdRole in enumerate(hdrNonStdRoles):
if u'code' in os.path.basename(hdrNonStdRole).lower():
iCodeRole = i
break
if iCodeRole >= 0 and len(hdrNonStdRoles) > 1 and iCodeRole < len(hdrNonStdRoles) - 1:
del hdrNonStdRoles[iCodeRole]
hdrNonStdRoles.append(hdrNonStdRole)
if view.topRollup[u"x"]:
view.xAxisChildrenFirst.set(view.topRollup[u"x"] == CHILD_ROLLUP_LAST)
if view.topRollup[u"y"]:
view.yAxisChildrenFirst.set(view.topRollup[u"y"] == CHILD_ROLLUP_LAST)
return (tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode)
def sortkey(obj):
if isinstance(obj, ModelObject):
return obj.objectIndex
return obj
def addBreakdownNode(view, disposition, node):
if isinstance(node, ModelBreakdown):
axisBreakdowns = view.breakdownNodes[disposition]
if node not in axisBreakdowns:
axisBreakdowns.append(node)
def expandDefinition(view, structuralNode, breakdownNode, definitionNode, depth, axisDisposition, facts, i=None, tblAxisRels=None, processOpenDefinitionNode=True):
subtreeRelationships = view.axisSubtreeRelSet.fromModelObject(definitionNode)
def checkLabelWidth(structuralNode, checkBoundFact=False):
if axisDisposition == u"y":
# messages can't be evaluated, just use the text portion of format string
label = structuralNode.header(lang=view.lang,
returnGenLabel=not checkBoundFact,
returnMsgFormatString=not checkBoundFact)
if label:
# need to et more exact word length in screen units
widestWordLen = max(len(w) * RENDER_UNITS_PER_CHAR for w in label.split())
# abstract only pertains to subtree of closed nodesbut not cartesian products or open nodes
while structuralNode.depth >= len(view.rowHdrColWidth):
view.rowHdrColWidth.append(0)
if definitionNode.isAbstract or not subtreeRelationships: # isinstance(definitionNode, ModelOpenDefinitionNode):
if widestWordLen > view.rowHdrColWidth[structuralNode.depth]:
view.rowHdrColWidth[structuralNode.depth] = widestWordLen
else:
if widestWordLen > view.rowNonAbstractHdrSpanMin[structuralNode.depth]:
view.rowNonAbstractHdrSpanMin[structuralNode.depth] = widestWordLen
if structuralNode and isinstance(definitionNode, (ModelBreakdown, ModelEuAxisCoord, ModelDefinitionNode)):
try:
#cartesianProductNestedArgs = (view, depth, axisDisposition, facts, tblAxisRels, i)
ordCardinality, ordDepth = definitionNode.cardinalityAndDepth(structuralNode)
if (not definitionNode.isAbstract and
isinstance(definitionNode, ModelClosedDefinitionNode) and
ordCardinality == 0):
view.modelXbrl.error(u"xbrlte:closedDefinitionNodeZeroCardinality",
_(u"Closed definition node %(xlinkLabel)s does not contribute at least one structural node"),
modelObject=(view.modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, axis=definitionNode.localName)
nestedDepth = depth + ordDepth
# HF test
cartesianProductNestedArgs = [view, nestedDepth, axisDisposition, facts, tblAxisRels, i]
if axisDisposition == u"z":
if depth == 1: # choices (combo boxes) don't add to z row count
view.zAxisRows += 1
elif axisDisposition == u"x":
if ordDepth:
if nestedDepth - 1 > view.colHdrRows: view.colHdrRows = nestedDepth - 1
u'''
if not view.colHdrDocRow:
if definitionNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=view.lang):
view.colHdrDocRow = True
if not view.colHdrCodeRow:
if definitionNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"):
view.colHdrCodeRow = True
'''
hdrNonStdRoles = view.colHdrNonStdRoles
elif axisDisposition == u"y":
if ordDepth:
#if not definitionNode.isAbstract:
# view.dataRows += ordCardinality
if nestedDepth - 1 > view.rowHdrCols:
view.rowHdrCols = nestedDepth - 1
for j in xrange(1 + ordDepth):
view.rowHdrColWidth.append(RENDER_UNITS_PER_CHAR) # min width for 'tail' of nonAbstract coordinate
view.rowNonAbstractHdrSpanMin.append(0)
checkLabelWidth(structuralNode, checkBoundFact=False)
u'''
if not view.rowHdrDocCol:
if definitionNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=view.lang):
view.rowHdrDocCol = True
if not view.rowHdrCodeCol:
if definitionNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"):
view.rowHdrCodeCol = True
'''
hdrNonStdRoles = view.rowHdrNonStdRoles
if axisDisposition in (u"x", u"y"):
hdrNonStdPosition = -1 # where a match last occured
for rel in view.modelXbrl.relationshipSet(XbrlConst.elementLabel).fromModelObject(definitionNode):
if rel.toModelObject is not None and rel.toModelObject.role != XbrlConst.genStandardLabel:
labelLang = rel.toModelObject.xmlLang
labelRole = rel.toModelObject.role
if (labelLang == view.lang or labelLang.startswith(view.lang) or view.lang.startswith(labelLang)
or (u"code" in labelRole)):
labelRole = rel.toModelObject.role
if labelRole in hdrNonStdRoles:
hdrNonStdPosition = hdrNonStdRoles.index(labelRole)
else:
hdrNonStdRoles.insert(hdrNonStdPosition + 1, labelRole)
isCartesianProductExpanded = False
if not isinstance(definitionNode, ModelFilterDefinitionNode):
isCartesianProductExpanded = True
# note: reduced set of facts should always be passed to subsequent open nodes
for axisSubtreeRel in subtreeRelationships:
childDefinitionNode = axisSubtreeRel.toModelObject
if childDefinitionNode.isRollUp:
structuralNode.rollUpStructuralNode = StructuralNode(structuralNode, breakdownNode, childDefinitionNode, )
if not structuralNode.childStructuralNodes: # first sub ordinate is the roll up
structuralNode.subtreeRollUp = CHILD_ROLLUP_FIRST
else:
structuralNode.subtreeRollUp = CHILD_ROLLUP_LAST
if not view.topRollup.get(axisDisposition):
view.topRollup[axisDisposition] = structuralNode.subtreeRollUp
else:
if (isinstance(definitionNode, (ModelBreakdown, ModelCompositionDefinitionNode)) and
isinstance(childDefinitionNode, ModelRelationshipDefinitionNode)): # append list products to composititionAxes subObjCntxs
childStructuralNode = structuralNode
else:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, childDefinitionNode) # others are nested structuralNode
if axisDisposition != u"z":
structuralNode.childStructuralNodes.append(childStructuralNode)
if axisDisposition != u"z":
expandDefinition(view, childStructuralNode, breakdownNode, childDefinitionNode, depth+ordDepth, axisDisposition, facts, i, tblAxisRels) #recurse
if not (isinstance(childStructuralNode.definitionNode, ModelFilterDefinitionNode)
and any([node.isEntryPrototype(default=False) for node in childStructuralNode.childStructuralNodes])) :
# To be computed only if the structural node is not an open node
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
else:
childStructuralNode.indent = depth - 1
if structuralNode.choiceStructuralNodes is not None:
structuralNode.choiceStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, childDefinitionNode, depth + 1, axisDisposition, facts) #recurse
# required when switching from abstract to roll up to determine abstractness
#if not structuralNode.subtreeRollUp and structuralNode.childStructuralNodes and definitionNode.tag.endswith("Node"):
# structuralNode.subtreeRollUp = CHILDREN_BUT_NO_ROLLUP
#if not hasattr(structuralNode, "indent"): # probably also for multiple open axes
if processOpenDefinitionNode:
if isinstance(definitionNode, ModelRelationshipDefinitionNode):
structuralNode.isLabeled = False
selfStructuralNodes = {} if definitionNode.axis.endswith(u'-or-self') else None
for rel in definitionNode.relationships(structuralNode):
if not isinstance(rel, list):
relChildStructuralNode = addRelationship(breakdownNode, definitionNode, rel, structuralNode, cartesianProductNestedArgs, selfStructuralNodes)
else:
addRelationships(breakdownNode, definitionNode, rel, relChildStructuralNode, cartesianProductNestedArgs)
if axisDisposition == u"z":
# if definitionNode is first structural node child remove it
if structuralNode.choiceStructuralNodes and structuralNode.choiceStructuralNodes[0].definitionNode == definitionNode:
del structuralNode.choiceStructuralNodes[0]
# flatten hierarchy of nested structural nodes inot choice nodes (for single listbox)
def flattenChildNodesToChoices(childStructuralNodes, indent):
while childStructuralNodes:
choiceStructuralNode = childStructuralNodes.pop(0)
choiceStructuralNode.indent = indent
structuralNode.choiceStructuralNodes.append(choiceStructuralNode)
flattenChildNodesToChoices(choiceStructuralNode.childStructuralNodes, indent + 1)
if structuralNode.childStructuralNodes:
flattenChildNodesToChoices(structuralNode.childStructuralNodes, 0)
# set up by definitionNode.relationships
if isinstance(definitionNode, ModelConceptRelationshipDefinitionNode):
if (definitionNode._sourceQname != XbrlConst.qnXfiRoot and
definitionNode._sourceQname not in view.modelXbrl.qnameConcepts):
view.modelXbrl.error(u"xbrlte:invalidConceptRelationshipSource",
_(u"Concept relationship rule node %(xlinkLabel)s source %(source)s does not refer to an existing concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, source=definitionNode._sourceQname)
elif isinstance(definitionNode, ModelDimensionRelationshipDefinitionNode):
dim = view.modelXbrl.qnameConcepts.get(definitionNode._dimensionQname)
if dim is None or not dim.isExplicitDimension:
view.modelXbrl.error(u"xbrlte:invalidExplicitDimensionQName",
_(u"Dimension relationship rule node %(xlinkLabel)s dimension %(dimension)s does not refer to an existing explicit dimension."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, dimension=definitionNode._dimensionQname)
domMbr = view.modelXbrl.qnameConcepts.get(definitionNode._sourceQname)
if domMbr is None or not domMbr.isDomainMember:
view.modelXbrl.error(u"xbrlte:invalidDimensionRelationshipSource",
_(u"Dimension relationship rule node %(xlinkLabel)s source %(source)s does not refer to an existing domain member."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, source=definitionNode._sourceQname)
if (definitionNode._axis in (u"child", u"child-or-self", u"parent", u"parent-or-self", u"sibling", u"sibling-or-self") and
(not isinstance(definitionNode._generations, _NUM_TYPES) or definitionNode._generations > 1)):
view.modelXbrl.error(u"xbrlte:relationshipNodeTooManyGenerations ",
_(u"Relationship rule node %(xlinkLabel)s formulaAxis %(axis)s implies a single generation tree walk but generations %(generations)s is greater than one."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, axis=definitionNode._axis, generations=definitionNode._generations)
elif isinstance(definitionNode, ModelSelectionDefinitionNode):
structuralNode.setHasOpenNode()
structuralNode.isLabeled = False
isCartesianProductExpanded = True
varQn = definitionNode.variableQname
if varQn:
selections = sorted(structuralNode.evaluate(definitionNode, definitionNode.evaluate) or [],
key=lambda obj:sortkey(obj))
if isinstance(selections, (list,set,tuple)) and len(selections) > 1:
for selection in selections: # nested choices from selection list
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=selection)
childStructuralNode.variables[varQn] = selection
childStructuralNode.indent = 0
if axisDisposition == u"z":
structuralNode.choiceStructuralNodes.append(childStructuralNode)
childStructuralNode.zSelection = True
else:
structuralNode.childStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, facts, processOpenDefinitionNode=False) #recurse
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
else:
structuralNode.variables[varQn] = selections
elif isinstance(definitionNode, ModelFilterDefinitionNode):
structuralNode.setHasOpenNode()
structuralNode.isLabeled = False
isCartesianProductExpanded = True
structuralNode.abstract = True # spanning ordinate acts as a subtitle
filteredFactsPartitions = structuralNode.evaluate(definitionNode,
definitionNode.filteredFactsPartitions,
evalArgs=(facts,))
if structuralNode._rendrCntx.formulaOptions.traceVariableFilterWinnowing:
view.modelXbrl.info(u"table:trace",
_(u"Filter node %(xlinkLabel)s facts partitions: %(factsPartitions)s"),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel,
factsPartitions=unicode(filteredFactsPartitions))
# ohly for fact entry (true if no parent open nodes or all are on entry prototype row)
if axisDisposition != u"z":
childList = structuralNode.childStructuralNodes
if structuralNode.isEntryPrototype(default=True):
for i in xrange(getattr(view, u"openBreakdownLines",
# for file output, 1 entry row if no facts
0 if filteredFactsPartitions else 1)):
view.aspectEntryObjectId += 1
filteredFactsPartitions.append([FactPrototype(view, {u"aspectEntryObjectId": OPEN_ASPECT_ENTRY_SURROGATE + unicode(view.aspectEntryObjectId)})])
if structuralNode.isEntryPrototype(default=False):
break # only one node per cartesian product under outermost nested open entry row
else:
childList = structuralNode.choiceStructuralNodes
for factsPartition in filteredFactsPartitions:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=factsPartition[0])
childStructuralNode.indent = 0
childStructuralNode.depth -= 1 # for label width; parent is merged/invisible
childList.append(childStructuralNode)
checkLabelWidth(childStructuralNode, checkBoundFact=True)
#expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, factsPartition, processOpenDefinitionNode=False) #recurse
cartesianProductNestedArgs[3] = factsPartition
# note: reduced set of facts should always be passed to subsequent open nodes
if subtreeRelationships:
for axisSubtreeRel in subtreeRelationships:
child2DefinitionNode = axisSubtreeRel.toModelObject
child2StructuralNode = StructuralNode(childStructuralNode, breakdownNode, child2DefinitionNode) # others are nested structuralNode
childStructuralNode.childStructuralNodes.append(child2StructuralNode)
expandDefinition(view, child2StructuralNode, breakdownNode, child2DefinitionNode, depth+ordDepth, axisDisposition, factsPartition) #recurse
cartesianProductExpander(child2StructuralNode, *cartesianProductNestedArgs)
else:
cartesianProductExpander(childStructuralNode, *cartesianProductNestedArgs)
# sort by header (which is likely to be typed dim value, for example)
childList.sort(key=lambda childStructuralNode:
childStructuralNode.header(lang=view.lang,
returnGenLabel=False,
returnMsgFormatString=False)
or u'') # exception on trying to sort if header returns None
# TBD if there is no abstract 'sub header' for these subOrdCntxs, move them in place of parent structuralNode
elif isinstance(definitionNode, ModelTupleDefinitionNode):
structuralNode.abstract = True # spanning ordinate acts as a subtitle
matchingTupleFacts = structuralNode.evaluate(definitionNode,
definitionNode.filteredFacts,
evalArgs=(facts,))
for tupleFact in matchingTupleFacts:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, definitionNode, contextItemFact=tupleFact)
childStructuralNode.indent = 0
structuralNode.childStructuralNodes.append(childStructuralNode)
expandDefinition(view, childStructuralNode, breakdownNode, definitionNode, depth, axisDisposition, [tupleFact]) #recurse
# sort by header (which is likely to be typed dim value, for example)
if (structuralNode.childStructuralNodes and
any(sOC.header(lang=view.lang) for sOC in structuralNode.childStructuralNodes)):
structuralNode.childStructuralNodes.sort(key=lambda childStructuralNode: childStructuralNode.header(lang=view.lang) or u'')
elif isinstance(definitionNode, ModelRuleDefinitionNode):
for constraintSet in definitionNode.constraintSets.values():
for aspect in constraintSet.aspectsCovered():
if not constraintSet.aspectValueDependsOnVars(aspect):
if aspect == Aspect.CONCEPT:
conceptQname = definitionNode.aspectValue(view.rendrCntx, Aspect.CONCEPT)
concept = view.modelXbrl.qnameConcepts.get(conceptQname)
if concept is None or not concept.isItem or concept.isDimensionItem or concept.isHypercubeItem:
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies concept %(concept)s does not refer to an existing primary item concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=conceptQname)
elif isinstance(aspect, QName):
dim = view.modelXbrl.qnameConcepts.get(aspect)
memQname = definitionNode.aspectValue(view.rendrCntx, aspect)
mem = view.modelXbrl.qnameConcepts.get(memQname)
if dim is None or not dim.isDimensionItem:
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies dimension %(concept)s does not refer to an existing dimension concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=aspect)
if isinstance(memQname, QName) and (mem is None or not mem.isDomainMember):
view.modelXbrl.error(u"xbrlte:invalidQNameAspectValue",
_(u"Rule node %(xlinkLabel)s specifies domain member %(concept)s does not refer to an existing domain member concept."),
modelObject=definitionNode, xlinkLabel=definitionNode.xlinkLabel, concept=memQname)
if axisDisposition == u"z":
if structuralNode.choiceStructuralNodes:
choiceNodeIndex = view.zOrdinateChoices.get(definitionNode, 0)
if isinstance(choiceNodeIndex, dict): # aspect entry for open node
structuralNode.aspects = choiceNodeIndex
structuralNode.choiceNodeIndex = -1
elif choiceNodeIndex < len(structuralNode.choiceStructuralNodes):
structuralNode.choiceNodeIndex = choiceNodeIndex
else:
structuralNode.choiceNodeIndex = 0
view.zmostOrdCntx = structuralNode
if not isCartesianProductExpanded or (axisDisposition == u"z" and structuralNode.choiceStructuralNodes is not None):
cartesianProductExpander(structuralNode, *cartesianProductNestedArgs)
if not structuralNode.childStructuralNodes: # childless root ordinate, make a child to iterate in producing table
subOrdContext = StructuralNode(structuralNode, breakdownNode, definitionNode)
except ResolutionException, ex:
if sys.version[0] >= u'3':
#import traceback
#traceback.print_tb(ex.__traceback__)
raise ex, None, ex.__traceback__ # provide original traceback information
else:
raise ex
except Exception, ex:
e = ResolutionException(u"arelle:resolutionException",
_(u"Exception in resolution of definition node %(node)s: %(error)s"),
modelObject=definitionNode, node=definitionNode.qname, error=unicode(ex)
)
if sys.version[0] >= u'3':
raise e, None, ex.__traceback__ # provide original traceback information
else:
raise e
def cartesianProductExpander(childStructuralNode, view, depth, axisDisposition, facts, tblAxisRels, i):
if i is not None: # recurse table relationships for cartesian product
for j, tblRel in enumerate(tblAxisRels[i+1:]):
tblObj = tblRel.toModelObject
if isinstance(tblObj, (ModelEuAxisCoord, ModelDefinitionNode)) and axisDisposition == tblRel.axisDisposition:
#addBreakdownNode(view, axisDisposition, tblObj)
#if tblObj.cardinalityAndDepth(childStructuralNode)[1] or axisDisposition == "z":
if axisDisposition == u"z":
subOrdTblCntx = StructuralNode(childStructuralNode, tblObj, tblObj)
subOrdTblCntx._choiceStructuralNodes = [] # this is a breakdwon node
subOrdTblCntx.indent = 0 # separate breakdown not indented]
depth = 0 # cartesian next z is also depth 0
childStructuralNode.childStructuralNodes.append(subOrdTblCntx)
else: # non-ordinate composition
subOrdTblCntx = childStructuralNode
# predefined axes need facts sub-filtered
if isinstance(childStructuralNode.definitionNode, ModelClosedDefinitionNode):
matchingFacts = childStructuralNode.evaluate(childStructuralNode.definitionNode,
childStructuralNode.definitionNode.filteredFacts,
evalArgs=(facts,))
else:
matchingFacts = facts
# returns whether there were no structural node results
subOrdTblCntx.abstract = True # can't be abstract across breakdown
expandDefinition(view, subOrdTblCntx, tblObj, tblObj,
depth, # depth + (0 if axisDisposition == 'z' else 1),
axisDisposition, matchingFacts, j + i + 1, tblAxisRels) #cartesian product
break
def addRelationship(breakdownNode, relDefinitionNode, rel, structuralNode, cartesianProductNestedArgs, selfStructuralNodes=None):
variableQname = relDefinitionNode.variableQname
conceptQname = relDefinitionNode.conceptQname
coveredAspect = relDefinitionNode.coveredAspect(structuralNode)
if not coveredAspect:
return None
if selfStructuralNodes is not None:
fromConceptQname = rel.fromModelObject.qname
# is there an ordinate for this root object?
if fromConceptQname in selfStructuralNodes:
childStructuralNode = selfStructuralNodes[fromConceptQname]
else:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(childStructuralNode)
selfStructuralNodes[fromConceptQname] = childStructuralNode
if variableQname:
childStructuralNode.variables[variableQname] = []
if conceptQname:
childStructuralNode.variables[conceptQname] = fromConceptQname
childStructuralNode.aspects[coveredAspect] = fromConceptQname
relChildStructuralNode = StructuralNode(childStructuralNode, breakdownNode, relDefinitionNode)
childStructuralNode.childStructuralNodes.append(relChildStructuralNode)
else:
relChildStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(relChildStructuralNode)
preferredLabel = rel.preferredLabel
if preferredLabel == XbrlConst.periodStartLabel:
relChildStructuralNode.tagSelector = u"table.periodStart"
elif preferredLabel == XbrlConst.periodStartLabel:
relChildStructuralNode.tagSelector = u"table.periodEnd"
if variableQname:
relChildStructuralNode.variables[variableQname] = rel
toConceptQname = rel.toModelObject.qname
if conceptQname:
relChildStructuralNode.variables[conceptQname] = toConceptQname
relChildStructuralNode.aspects[coveredAspect] = toConceptQname
cartesianProductExpander(relChildStructuralNode, *cartesianProductNestedArgs)
return relChildStructuralNode
def addRelationships(breakdownNode, relDefinitionNode, rels, structuralNode, cartesianProductNestedArgs):
childStructuralNode = None # holder for nested relationships
for rel in rels:
if not isinstance(rel, list):
# first entry can be parent of nested list relationships
childStructuralNode = addRelationship(breakdownNode, relDefinitionNode, rel, structuralNode, cartesianProductNestedArgs)
elif childStructuralNode is None:
childStructuralNode = StructuralNode(structuralNode, breakdownNode, relDefinitionNode)
structuralNode.childStructuralNodes.append(childStructuralNode)
addRelationships(breakdownNode, relDefinitionNode, rel, childStructuralNode, cartesianProductNestedArgs)
else:
addRelationships(breakdownNode, relDefinitionNode, rel, childStructuralNode, cartesianProductNestedArgs)
|
|
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
def normc_initializer(std=1.0):
"""
Initialize array with normalized columns
"""
def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def dense(x, size, name, weight_init=None):
"""
Dense (fully connected) layer
"""
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable(name + "/b", [size], initializer=tf.zeros_initializer())
return tf.matmul(x, w) + b
def fancy_slice_2d(X, inds0, inds1):
"""
Like numpy's X[inds0, inds1]
"""
inds0 = tf.cast(inds0, tf.int64)
inds1 = tf.cast(inds1, tf.int64)
shape = tf.cast(tf.shape(X), tf.int64)
ncols = shape[1]
Xflat = tf.reshape(X, [-1])
return tf.gather(Xflat, inds0 * ncols + inds1)
def discount(x, gamma):
"""
Compute discounted sum of future values
out[i] = in[i] + gamma * in[i+1] + gamma^2 * in[i+2] + ...
"""
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance_1d(ypred,y):
"""
Var[ypred - y] / var[y].
https://www.quora.com/What-is-the-meaning-proportion-of-variance-explained-in-linear-regression
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1)
def pathlength(path):
return len(path["reward"])
class LinearValueFunction(object):
coef = None
def fit(self, X, y):
Xp = self.preproc(X)
A = Xp.T.dot(Xp)
nfeats = Xp.shape[1]
A[np.arange(nfeats), np.arange(nfeats)] += 1e-3 # a little ridge regression
b = Xp.T.dot(y)
self.coef = np.linalg.solve(A, b)
def predict(self, X):
if self.coef is None:
return np.zeros(X.shape[0])
else:
return self.preproc(X).dot(self.coef)
def preproc(self, X):
return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)
class NnValueFunction(object):
pass # YOUR CODE HERE
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def main_cartpole(n_iter=100, gamma=1.0, min_timesteps_per_batch=1000, stepsize=1e-2, animate=True, logdir=None):
env = gym.make("CartPole-v0")
ob_dim = env.observation_space.shape[0]
num_actions = env.action_space.n
logz.configure_output_dir(logdir)
vf = LinearValueFunction()
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in these function
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32) # batch of observations
sy_ac_n = tf.placeholder(shape=[None], name="ac", dtype=tf.int32) # batch of actions taken by the policy, used for policy gradient computation
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32) # advantage function estimate
sy_h1 = lrelu(dense(sy_ob_no, 32, "h1", weight_init=normc_initializer(1.0))) # hidden layer
sy_logits_na = dense(sy_h1, num_actions, "final", weight_init=normc_initializer(0.05)) # "logits", describing probability distribution of final layer
# we use a small initialization for the last layer, so the initial policy has maximal entropy
sy_oldlogits_na = tf.placeholder(shape=[None, num_actions], name='oldlogits', dtype=tf.float32) # logits BEFORE update (just used for KL diagnostic)
sy_logp_na = tf.nn.log_softmax(sy_logits_na) # logprobability of actions
sy_sampled_ac = categorical_sample_logits(sy_logits_na)[0] # sampled actions, used for defining the policy (NOT computing the policy gradient)
sy_n = tf.shape(sy_ob_no)[0]
sy_logprob_n = fancy_slice_2d(sy_logp_na, tf.range(sy_n), sy_ac_n) # log-prob of actions taken -- used for policy gradient calculation
# The following quantities are just used for computing KL and entropy, JUST FOR DIAGNOSTIC PURPOSES >>>>
sy_oldlogp_na = tf.nn.log_softmax(sy_oldlogits_na)
sy_oldp_na = tf.exp(sy_oldlogp_na)
sy_kl = tf.reduce_sum(sy_oldp_na * (sy_oldlogp_na - sy_logp_na)) / tf.to_float(sy_n)
sy_p_na = tf.exp(sy_logp_na)
sy_ent = tf.reduce_sum( - sy_p_na * sy_logp_na) / tf.to_float(sy_n)
# <<<<<<<<<<<<<
sy_surr = - tf.reduce_mean(sy_adv_n * sy_logprob_n) # Loss function that we'll differentiate to get the policy gradient ("surr" is for "surrogate loss")
sy_stepsize = tf.placeholder(shape=[], dtype=tf.float32) # Symbolic, in case you want to change the stepsize during optimization. (We're not doing that currently)
update_op = tf.train.AdamOptimizer(sy_stepsize).minimize(sy_surr)
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
# use single thread. on such a small problem, multithreading gives you a slowdown
# this way, we can better use multiple cores for different experiments
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
total_timesteps = 0
for i in range(n_iter):
print("********** Iteration %i ************"%i)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
terminated = False
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (i % 10 == 0) and animate)
while True:
if animate_this_episode:
env.render()
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
if done:
break
path = {"observation" : np.array(obs), "terminated" : terminated,
"reward" : np.array(rewards), "action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Estimate advantage function
vtargs, vpreds, advs = [], [], []
for path in paths:
rew_t = path["reward"]
return_t = discount(rew_t, gamma)
vpred_t = vf.predict(path["observation"])
adv_t = return_t - vpred_t
advs.append(adv_t)
vtargs.append(return_t)
vpreds.append(vpred_t)
# Build arrays for policy update
ob_no = np.concatenate([path["observation"] for path in paths])
ac_n = np.concatenate([path["action"] for path in paths])
adv_n = np.concatenate(advs)
standardized_adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + 1e-8)
vtarg_n = np.concatenate(vtargs)
vpred_n = np.concatenate(vpreds)
vf.fit(ob_no, vtarg_n)
# Policy update
_, oldlogits_na = sess.run([update_op, sy_logits_na], feed_dict={sy_ob_no:ob_no, sy_ac_n:ac_n, sy_adv_n:standardized_adv_n, sy_stepsize:stepsize})
kl, ent = sess.run([sy_kl, sy_ent], feed_dict={sy_ob_no:ob_no, sy_oldlogits_na:oldlogits_na})
# Log diagnostics
logz.log_tabular("EpRewMean", np.mean([path["reward"].sum() for path in paths]))
logz.log_tabular("EpLenMean", np.mean([pathlength(path) for path in paths]))
logz.log_tabular("KLOldNew", kl)
logz.log_tabular("Entropy", ent)
logz.log_tabular("EVBefore", explained_variance_1d(vpred_n, vtarg_n))
logz.log_tabular("EVAfter", explained_variance_1d(vf.predict(ob_no), vtarg_n))
logz.log_tabular("TimestepsSoFar", total_timesteps)
# If you're overfitting, EVAfter will be way larger than EVBefore.
# Note that we fit value function AFTER using it to compute the advantage function to avoid introducing bias
logz.dump_tabular()
def main_pendulum(logdir, seed, n_iter, gamma, min_timesteps_per_batch, initial_stepsize, desired_kl, vf_type, vf_params, animate=False):
tf.set_random_seed(seed)
np.random.seed(seed)
env = gym.make("Pendulum-v0")
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.shape[0]
logz.configure_output_dir(logdir)
if vf_type == 'linear':
vf = LinearValueFunction(**vf_params)
elif vf_type == 'nn':
vf = NnValueFunction(ob_dim=ob_dim, **vf_params)
YOUR_CODE_HERE
sy_surr = - tf.reduce_mean(sy_adv_n * sy_logprob_n) # Loss function that we'll differentiate to get the policy gradient ("surr" is for "surrogate loss")
sy_stepsize = tf.placeholder(shape=[], dtype=tf.float32) # Symbolic, in case you want to change the stepsize during optimization. (We're not doing that currently)
update_op = tf.train.AdamOptimizer(sy_stepsize).minimize(sy_surr)
sess = tf.Session()
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
total_timesteps = 0
stepsize = initial_stepsize
for i in range(n_iter):
print("********** Iteration %i ************"%i)
YOUR_CODE_HERE
if kl > desired_kl * 2:
stepsize /= 1.5
print('stepsize -> %s'%stepsize)
elif kl < desired_kl / 2:
stepsize *= 1.5
print('stepsize -> %s'%stepsize)
else:
print('stepsize OK')
# Log diagnostics
logz.log_tabular("EpRewMean", np.mean([path["reward"].sum() for path in paths]))
logz.log_tabular("EpLenMean", np.mean([pathlength(path) for path in paths]))
logz.log_tabular("KLOldNew", kl)
logz.log_tabular("Entropy", ent)
logz.log_tabular("EVBefore", explained_variance_1d(vpred_n, vtarg_n))
logz.log_tabular("EVAfter", explained_variance_1d(vf.predict(ob_no), vtarg_n))
logz.log_tabular("TimestepsSoFar", total_timesteps)
# If you're overfitting, EVAfter will be way larger than EVBefore.
# Note that we fit value function AFTER using it to compute the advantage function to avoid introducing bias
logz.dump_tabular()
def main_pendulum1(d):
return main_pendulum(**d)
if __name__ == "__main__":
if 1:
main_cartpole(logdir=None) # when you want to start collecting results, set the logdir
if 0:
general_params = dict(gamma=0.97, animate=False, min_timesteps_per_batch=2500, n_iter=300, initial_stepsize=1e-3)
params = [
dict(logdir='/tmp/ref/linearvf-kl2e-3-seed0', seed=0, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),
dict(logdir='/tmp/ref/nnvf-kl2e-3-seed0', seed=0, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),
dict(logdir='/tmp/ref/linearvf-kl2e-3-seed1', seed=1, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),
dict(logdir='/tmp/ref/nnvf-kl2e-3-seed1', seed=1, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),
dict(logdir='/tmp/ref/linearvf-kl2e-3-seed2', seed=2, desired_kl=2e-3, vf_type='linear', vf_params={}, **general_params),
dict(logdir='/tmp/ref/nnvf-kl2e-3-seed2', seed=2, desired_kl=2e-3, vf_type='nn', vf_params=dict(n_epochs=10, stepsize=1e-3), **general_params),
]
import multiprocessing
p = multiprocessing.Pool()
p.map(main_pendulum1, params)
|
|
# Django
from django.db import models
# Third-party apps
import jsonfield # http://pypi.python.org/pypi/django-jsonfield/
from lxml import etree # http://lxml.de/
# Internal
from .log import default_logger as logger
from .utils.introspection import ModelFactory
from .utils.serializers import deserialize_function
from .utils.xmlhelper import XMLHelper
from .utils import sum_dicts
class Mapping(models.Model):
"""A mapping configuration."""
data_map = jsonfield.JSONField(default='{}') # need a default value
label = models.CharField(max_length=255, unique=True) # label for reference
def __unicode__(self):
return u'%s' % (
self.label,
)
@property
def log_desc(self):
return u'<Mapping: %s>' % (self,)
def load_xml(self, xml, root_path=None):
"""Loads a piece of XML in the DB, i.e. map XML data to a Django Model.
Args:
xml: a string being the XML data to load
root_path: the root (dotted path) of the XML data. Not mandatory but needed when the XML is not the root as defined in the mapping.
e.g. If you defined a mapping for rss.channel.item
and the XML you are passing actually starts with the channel element, you must then set root_path to rss.channel
Returns:
A dict summarizing the number of objects created per element-mapping.
#The number of created Models per "element-mapping"
"""
log_desc = '%s - Loading XML' % (self.log_desc,)
try:
# Parse the XML
root = etree.fromstring(xml, parser=etree.XMLParser())
except Exception as e:
logger.error('%s => XML cannot be parsed. [KO]\n%s' % (log_desc, e))
return 0
nb_created = {k: 0 for k in self.data_map.keys()}
# For each element-mapping
for e_path, conf in self.data_map.iteritems():
nb_created[e_path] = nb_created[e_path] + self._map_elements_by_path(e_path, conf, root, root_path)
logger.info('%s => %s' % (log_desc, ' ; '.join(['%s: %s objects created' % (k, v) for (k, v) in nb_created.items()])))
return nb_created
def load_xml_chunks(self, xml_chunks, root_path):
"""Loads a collection of XML chunks being all of the same kind.
Args:
xml_chunks: a list of XML string data to load
root_path: the root (dotted path) of the XML data. Not mandatory but needed when the XML is not the root as defined in the mapping.
e.g. If you defined a mapping for rss.channel.item
and the XML you are passing actually starts with the channel element, you must then set root_path to rss.channel
Returns:
A dict summarizing the number of objects created per element-mapping.
#The number of created Models per "element-mapping"
TODO: Make it more efficient instead of a simple loop.
"""
log_desc = '%s - Loading XML chunks' % (self.log_desc,)
logger.info('%s => start' % (log_desc,))
nb_created = {}
for xml in xml_chunks:
nb_created = sum_dicts(nb_created, self.load_xml(xml, root_path))
logger.info('%s => end' % (log_desc,))
return nb_created
def _map_elements_by_path(self, path, conf, node, node_path):
"""Maps all the elements matching the path in the node with the mapping configuration.
Args:
path: the path of the elements to seek
conf: the mapping configuration
node: the node from which to seek
node_path: the path of the node
Returns:
The number of Models created in the DB for all the found elements.
"""
# Get the configuration
get_id = conf.get('get_id', None)
models = conf.get('models', None)
if models is None:
logger.error('%s => No models found in the configuration. [KO]\nconfiguration=%s' % (log_desc, conf))
return 0
log_desc = '%s - Mapping all the elements matching path=%s to %s Models' % (self.log_desc, path, len(models))
# Get all the matching elements
elems = XMLHelper.get_elements(path, node, node_path)
# Log if no elements were found.
if not elems:
logger.warning('%s => No elements found. node_path=%s' % (log_desc, node_path))
return 0
nb_created = 0
for elem in elems:
nb_created = nb_created + self._map_element(elem, models, get_id)
nb_elems = len(elems)
nb_targeted = nb_elems * len(models)
logger.info('%s => Found: %s, Targeted Objects: %s, Created Objects: %s %s' % (
log_desc,
nb_elems,
nb_targeted,
nb_created,
(nb_targeted == nb_created and ['[OK]'] or ['=> numbers different [KO]'])[0]
)
)
return nb_created
def _map_element(self, element, models, get_id=None):
"""Maps an element to several models.
Args:
element: an XML element
models: the models to mapped
get_id: the function to use to calculate the ID of the element to identify it amongst the other.
Returns:
The number of Models created in the DB for the passed element.
"""
elem_id = '(id:%s) ' % (self._resolve_get_id(get_id)(element),)
status = {k: '[KO]' for k in models.keys()}
nb_created = 0
for app_model, fields in models.iteritems():
try:
ins = self._map_to_model(element, app_model, fields)
status[app_model] = 'pk=%s' % (ins.pk)
nb_created = nb_created + 1
logger.info('%s - Mapping the element %sto the Model %s with fields %s => object created, pk=%s [0K]' % (
self.log_desc,
elem_id,
app_model,
fields,
ins.pk,
)
)
except Exception as err:
logger.error('%s - Mapping the element %sto the Model %s with fields %s => Cannot be mapped. [K0]\n%s' % (
self.log_desc,
elem_id,
app_model,
fields,
err,
)
)
logger.info('%s - Element %smapped to %s Models => %s' % (
self.log_desc,
elem_id,
len(models),
' ; '.join(['%s: %s' % (k, v) for (k, v) in status.items()]),
)
)
return nb_created
def _map_to_model(self, element, app_model, fields):
"""Maps an element to a Model.
Args:
element: the XML element to map
app_model: the model to map defined by: app_label.model_name
fields: the fields mapping
Returns:
The instance of the created Model.
"""
ins = ModelFactory.create(app_model)
self._map_to_fields(element, ins, fields)
ins.save()
return ins
def _map_to_fields(self, element, ins, fields):
"""Maps an element to the fields.
Args:
element: the XML element to map
ins: the instance of the created Model
fields: the fields mapping
"""
for field, configuration in fields.items():
if isinstance(configuration, basestring):
setattr(ins, field, XMLHelper.get_text_unescape(element, configuration))
elif isinstance(configuration, list):
values = (XMLHelper.get_text_unescape(element, v) for v in configuration)
setattr(ins, field, ' '.join(values))
elif isinstance(configuration, dict):
pass # TODO: handles advanced transformers
def _resolve_get_id(self, get_id):
"""Resolves which function should be used to calculate the ID of an element.
Args:
get_id: a function/method or a string to use an inner element.
Returns:
A function that will take an element and returns an ID.
"""
# Try to deserialize it
try:
return deserialize_function(get_id)
except:
pass
# Deserialization could not figure it out
# so let's assume it is a tag and we want to use the text of the element
if isinstance(get_id, basestring):
return lambda x: XMLHelper.get_text(x, get_id)
# Nothing works, returns the get_id
return lambda x: get_id
|
|
from __future__ import absolute_import
import contextlib
import errno
import os
from mercurial.node import sha1nodeconstants
from mercurial import (
error,
extensions,
match as matchmod,
pycompat,
scmutil,
util,
)
from mercurial.interfaces import (
dirstate as intdirstate,
util as interfaceutil,
)
from . import gitutil
pygit2 = gitutil.get_pygit2()
def readpatternfile(orig, filepath, warn, sourceinfo=False):
if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
return orig(filepath, warn, sourceinfo=False)
result = []
warnings = []
with open(filepath, b'rb') as fp:
for l in fp:
l = l.strip()
if not l or l.startswith(b'#'):
continue
if l.startswith(b'!'):
warnings.append(b'unsupported ignore pattern %s' % l)
continue
if l.startswith(b'/'):
result.append(b'rootglob:' + l[1:])
else:
result.append(b'relglob:' + l)
return result, warnings
extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
_STATUS_MAP = {}
if pygit2:
_STATUS_MAP = {
pygit2.GIT_STATUS_CONFLICTED: b'm',
pygit2.GIT_STATUS_CURRENT: b'n',
pygit2.GIT_STATUS_IGNORED: b'?',
pygit2.GIT_STATUS_INDEX_DELETED: b'r',
pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
pygit2.GIT_STATUS_INDEX_NEW: b'a',
pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
pygit2.GIT_STATUS_WT_DELETED: b'r',
pygit2.GIT_STATUS_WT_MODIFIED: b'n',
pygit2.GIT_STATUS_WT_NEW: b'?',
pygit2.GIT_STATUS_WT_RENAMED: b'a',
pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm',
}
@interfaceutil.implementer(intdirstate.idirstate)
class gitdirstate(object):
def __init__(self, ui, root, gitrepo):
self._ui = ui
self._root = os.path.dirname(root)
self.git = gitrepo
self._plchangecallbacks = {}
# TODO: context.poststatusfixup is bad and uses this attribute
self._dirty = False
def p1(self):
try:
return self.git.head.peel().id.raw
except pygit2.GitError:
# Typically happens when peeling HEAD fails, as in an
# empty repository.
return sha1nodeconstants.nullid
def p2(self):
# TODO: MERGE_HEAD? something like that, right?
return sha1nodeconstants.nullid
def setparents(self, p1, p2=None):
if p2 is None:
p2 = sha1nodeconstants.nullid
assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
self.git.head.set_target(gitutil.togitnode(p1))
@util.propertycache
def identity(self):
return util.filestat.frompath(
os.path.join(self._root, b'.git', b'index')
)
def branch(self):
return b'default'
def parents(self):
# TODO how on earth do we find p2 if a merge is in flight?
return self.p1(), sha1nodeconstants.nullid
def __iter__(self):
return (pycompat.fsencode(f.path) for f in self.git.index)
def items(self):
for ie in self.git.index:
yield ie.path, None # value should be a DirstateItem
# py2,3 compat forward
iteritems = items
def __getitem__(self, filename):
try:
gs = self.git.status_file(filename)
except KeyError:
return b'?'
return _STATUS_MAP[gs]
def __contains__(self, filename):
try:
gs = self.git.status_file(filename)
return _STATUS_MAP[gs] != b'?'
except KeyError:
return False
def status(self, match, subrepos, ignored, clean, unknown):
listclean = clean
# TODO handling of clean files - can we get that from git.status()?
modified, added, removed, deleted, unknown, ignored, clean = (
[],
[],
[],
[],
[],
[],
[],
)
gstatus = self.git.status()
for path, status in gstatus.items():
path = pycompat.fsencode(path)
if not match(path):
continue
if status == pygit2.GIT_STATUS_IGNORED:
if path.endswith(b'/'):
continue
ignored.append(path)
elif status in (
pygit2.GIT_STATUS_WT_MODIFIED,
pygit2.GIT_STATUS_INDEX_MODIFIED,
pygit2.GIT_STATUS_WT_MODIFIED
| pygit2.GIT_STATUS_INDEX_MODIFIED,
):
modified.append(path)
elif status == pygit2.GIT_STATUS_INDEX_NEW:
added.append(path)
elif status == pygit2.GIT_STATUS_WT_NEW:
unknown.append(path)
elif status == pygit2.GIT_STATUS_WT_DELETED:
deleted.append(path)
elif status == pygit2.GIT_STATUS_INDEX_DELETED:
removed.append(path)
else:
raise error.Abort(
b'unhandled case: status for %r is %r' % (path, status)
)
if listclean:
observed = set(
modified + added + removed + deleted + unknown + ignored
)
index = self.git.index
index.read()
for entry in index:
path = pycompat.fsencode(entry.path)
if not match(path):
continue
if path in observed:
continue # already in some other set
if path[-1] == b'/':
continue # directory
clean.append(path)
# TODO are we really always sure of status here?
return (
False,
scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
),
)
def flagfunc(self, buildfallback):
# TODO we can do better
return buildfallback()
def getcwd(self):
# TODO is this a good way to do this?
return os.path.dirname(
os.path.dirname(pycompat.fsencode(self.git.path))
)
def normalize(self, path):
normed = util.normcase(path)
assert normed == path, b"TODO handling of case folding: %s != %s" % (
normed,
path,
)
return path
@property
def _checklink(self):
return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
def copies(self):
# TODO support copies?
return {}
# # TODO what the heck is this
_filecache = set()
def pendingparentchange(self):
# TODO: we need to implement the context manager bits and
# correctly stage/revert index edits.
return False
def write(self, tr):
# TODO: call parent change callbacks
if tr:
def writeinner(category):
self.git.index.write()
tr.addpending(b'gitdirstate', writeinner)
else:
self.git.index.write()
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
# TODO core dirstate does something about slashes here
assert isinstance(f, bytes)
r = util.pathto(self._root, cwd, f)
return r
def matches(self, match):
for x in self.git.index:
p = pycompat.fsencode(x.path)
if match(p):
yield p
def set_clean(self, f, parentfiledata=None):
"""Mark a file normal and clean."""
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
def set_possibly_dirty(self, f):
"""Mark a file normal, but possibly dirty."""
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
def walk(self, match, subrepos, unknown, ignored, full=True):
# TODO: we need to use .status() and not iterate the index,
# because the index doesn't force a re-walk and so `hg add` of
# a new file without an intervening call to status will
# silently do nothing.
r = {}
cwd = self.getcwd()
for path, status in self.git.status().items():
if path.startswith('.hg/'):
continue
path = pycompat.fsencode(path)
if not match(path):
continue
# TODO construct the stat info from the status object?
try:
s = os.stat(os.path.join(cwd, path))
except OSError as e:
if e.errno != errno.ENOENT:
raise
continue
r[path] = s
return r
def savebackup(self, tr, backupname):
# TODO: figure out a strategy for saving index backups.
pass
def restorebackup(self, tr, backupname):
# TODO: figure out a strategy for saving index backups.
pass
def set_tracked(self, f):
uf = pycompat.fsdecode(f)
if uf in self.git.index:
return False
index = self.git.index
index.read()
index.add(uf)
index.write()
return True
def add(self, f):
index = self.git.index
index.read()
index.add(pycompat.fsdecode(f))
index.write()
def drop(self, f):
index = self.git.index
index.read()
fs = pycompat.fsdecode(f)
if fs in index:
index.remove(fs)
index.write()
def set_untracked(self, f):
index = self.git.index
index.read()
fs = pycompat.fsdecode(f)
if fs in index:
index.remove(fs)
index.write()
return True
return False
def remove(self, f):
index = self.git.index
index.read()
index.remove(pycompat.fsdecode(f))
index.write()
def copied(self, path):
# TODO: track copies?
return None
def prefetch_parents(self):
# TODO
pass
def update_file(self, *args, **kwargs):
# TODO
pass
@contextlib.contextmanager
def parentchange(self):
# TODO: track this maybe?
yield
def addparentchangecallback(self, category, callback):
# TODO: should this be added to the dirstate interface?
self._plchangecallbacks[category] = callback
def clearbackup(self, tr, backupname):
# TODO
pass
def setbranch(self, branch):
raise error.Abort(
b'git repos do not support branches. try using bookmarks'
)
|
|
# -*- coding: Latin-1 -*-
"""peutils, Portable Executable utilities module
Copyright (c) 2005-2013 Ero Carrera <ero.carrera@gmail.com>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from builtins import object
import os
import re
import string
import urllib.request, urllib.parse, urllib.error
import pefile
__author__ = 'Ero Carrera'
__version__ = pefile.__version__
__contact__ = 'ero.carrera@gmail.com'
class SignatureDatabase(object):
"""This class loads and keeps a parsed PEiD signature database.
Usage:
sig_db = SignatureDatabase('/path/to/signature/file')
and/or
sig_db = SignatureDatabase()
sig_db.load('/path/to/signature/file')
Signature databases can be combined by performing multiple loads.
The filename parameter can be a URL too. In that case the
signature database will be downloaded from that location.
"""
def __init__(self, filename=None, data=None):
# RegExp to match a signature block
#
self.parse_sig = re.compile(
'\[(.*?)\]\s+?signature\s*=\s*(.*?)(\s+\?\?)*\s*ep_only\s*=\s*(\w+)(?:\s*section_start_only\s*=\s*(\w+)|)', re.S)
# Signature information
#
# Signatures are stored as trees using dictionaries
# The keys are the byte values while the values for
# each key are either:
#
# - Other dictionaries of the same form for further
# bytes in the signature
#
# - A dictionary with a string as a key (packer name)
# and None as value to indicate a full signature
#
self.signature_tree_eponly_true = dict ()
self.signature_count_eponly_true = 0
self.signature_tree_eponly_false = dict ()
self.signature_count_eponly_false = 0
self.signature_tree_section_start = dict ()
self.signature_count_section_start = 0
# The depth (length) of the longest signature
#
self.max_depth = 0
self.__load(filename=filename, data=data)
def generate_section_signatures(self, pe, name, sig_length=512):
"""Generates signatures for all the sections in a PE file.
If the section contains any data a signature will be created
for it. The signature name will be a combination of the
parameter 'name' and the section number and its name.
"""
section_signatures = list()
for idx, section in enumerate(pe.sections):
if section.SizeOfRawData < sig_length:
continue
#offset = pe.get_offset_from_rva(section.VirtualAddress)
offset = section.PointerToRawData
sig_name = '%s Section(%d/%d,%s)' % (
name, idx + 1, len(pe.sections),
''.join([c for c in section.Name if c in string.printable]))
section_signatures.append(
self.__generate_signature(
pe, offset, sig_name, ep_only=False,
section_start_only=True,
sig_length=sig_length) )
return '\n'.join(section_signatures)+'\n'
def generate_ep_signature(self, pe, name, sig_length=512):
"""Generate signatures for the entry point of a PE file.
Creates a signature whose name will be the parameter 'name'
and the section number and its name.
"""
offset = pe.get_offset_from_rva(pe.OPTIONAL_HEADER.AddressOfEntryPoint)
return self.__generate_signature(
pe, offset, name, ep_only=True, sig_length=sig_length)
def __generate_signature(self, pe, offset, name, ep_only=False,
section_start_only=False, sig_length=512):
data = pe.__data__[offset:offset+sig_length]
signature_bytes = ' '.join(['%02x' % ord(c) for c in data])
if ep_only == True:
ep_only = 'true'
else:
ep_only = 'false'
if section_start_only == True:
section_start_only = 'true'
else:
section_start_only = 'false'
signature = '[%s]\nsignature = %s\nep_only = %s\nsection_start_only = %s\n' % (
name, signature_bytes, ep_only, section_start_only)
return signature
def match(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns the exact match(es).
If ep_only is True the result will be a string with
the packer name. Otherwise it will be a list of the
form (file_ofsset, packer_name). Specifying where
in the file the signature was found.
"""
matches = self.__match(pe, ep_only, section_start_only)
# The last match (the most precise) from the
# list of matches (if any) is returned
#
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return [(match[0], match[1][-1]) for match in matches]
return matches[1][-1]
return None
def match_all(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns all the likely matches."""
matches = self.__match(pe, ep_only, section_start_only)
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return matches
return matches[1]
return None
def __match(self, pe, ep_only, section_start_only):
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.__data__
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
scan_addresses = [section.PointerToRawData for section in pe.sections]
elif ep_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.get_memory_mapped_image()
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# Fetch the entry point of the PE file and the data
# at the entry point
#
ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint
# Set the starting address to start scanning from
#
scan_addresses = [ep]
else:
data = pe.__data__
signatures = self.signature_tree_eponly_false
scan_addresses = range( len(data) )
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def match_data(self, code_data, ep_only=True, section_start_only=False):
data = code_data
scan_addresses = [ 0 ]
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
elif ep_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def __match_signature_tree(self, signature_tree, data, depth = 0):
"""Recursive function to find matches along the signature tree.
signature_tree is the part of the tree left to walk
data is the data being checked against the signature tree
depth keeps track of how far we have gone down the tree
"""
matched_names = list ()
match = signature_tree
# Walk the bytes in the data and match them
# against the signature
#
for idx, byte in enumerate ( [ord (b) for b in data] ):
# If the tree is exhausted...
#
if match is None :
break
# Get the next byte in the tree
#
match_next = match.get(byte, None)
# If None is among the values for the key
# it means that a signature in the database
# ends here and that there's an exact match.
#
if None in list(match.values()):
# idx represent how deep we are in the tree
#
#names = [idx+depth]
names = list()
# For each of the item pairs we check
# if it has an element other than None,
# if not then we have an exact signature
#
for item in list(match.items()):
if item[1] is None :
names.append (item[0])
matched_names.append(names)
# If a wildcard is found keep scanning the signature
# ignoring the byte.
#
if '??' in match :
match_tree_alternate = match.get ('??', None)
data_remaining = data[idx + 1 :]
if data_remaining:
matched_names.extend(
self.__match_signature_tree(
match_tree_alternate, data_remaining, idx+depth+1))
match = match_next
# If we have any more packer name in the end of the signature tree
# add them to the matches
#
if match is not None and None in list(match.values()):
#names = [idx + depth + 1]
names = list()
for item in list(match.items()) :
if item[1] is None:
names.append(item[0])
matched_names.append(names)
return matched_names
def load(self , filename=None, data=None):
"""Load a PEiD signature file.
Invoking this method on different files combines the signatures.
"""
self.__load(filename=filename, data=data)
def __load(self, filename=None, data=None):
if filename is not None:
# If the path does not exist, attempt to open a URL
#
if not os.path.exists(filename):
try:
sig_f = urllib.request.urlopen(filename)
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
# Get the data for a file
#
try:
sig_f = open( filename, 'rt' )
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
sig_data = data
# If the file/URL could not be read or no "raw" data
# was provided there's nothing else to do
#
if not sig_data:
return
# Helper function to parse the signature bytes
#
def to_byte(value) :
if value == '??' or value == '?0' :
return value
return int (value, 16)
# Parse all the signatures in the file
#
matches = self.parse_sig.findall(sig_data)
# For each signature, get the details and load it into the
# signature tree
#
for packer_name, signature, superfluous_wildcards, ep_only, section_start_only in matches:
ep_only = ep_only.strip().lower()
signature = signature.replace('\\n', '').strip()
signature_bytes = [to_byte(b) for b in signature.split()]
if ep_only == 'true':
ep_only = True
else:
ep_only = False
if section_start_only == 'true':
section_start_only = True
else:
section_start_only = False
depth = 0
if section_start_only is True:
tree = self.signature_tree_section_start
self.signature_count_section_start += 1
else:
if ep_only is True :
tree = self.signature_tree_eponly_true
self.signature_count_eponly_true += 1
else :
tree = self.signature_tree_eponly_false
self.signature_count_eponly_false += 1
for idx, byte in enumerate (signature_bytes) :
if idx+1 == len(signature_bytes):
tree[byte] = tree.get( byte, dict() )
tree[byte][packer_name] = None
else :
tree[byte] = tree.get ( byte, dict() )
tree = tree[byte]
depth += 1
if depth > self.max_depth:
self.max_depth = depth
def is_valid( pe ):
""""""
pass
def is_suspicious( pe ):
"""
unusual locations of import tables
non recognized section names
presence of long ASCII strings
"""
relocations_overlap_entry_point = False
sequential_relocs = 0
# If relocation data is found and the entries go over the entry point, and also are very
# continuous or point outside section's boundaries => it might imply that an obfuscation
# trick is being used or the relocations are corrupt (maybe intentionally)
#
if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'):
for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC:
last_reloc_rva = None
for reloc in base_reloc.entries:
if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4:
relocations_overlap_entry_point = True
if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4:
sequential_relocs += 1
last_reloc_rva = reloc.rva
# If import tables or strings exist (are pointed to) to within the header or in the area
# between the PE header and the first section that's supicious
#
# IMPLEMENT
warnings_while_parsing = False
# If we have warnings, that's suspicious, some of those will be because of out-of-ordinary
# values are found in the PE header fields
# Things that are reported in warnings:
# (parsing problems, special section characteristics i.e. W & X, uncommon values of fields,
# unusual entrypoint, suspicious imports)
#
warnings = pe.get_warnings()
if warnings:
warnings_while_parsing
# If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8)
# ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but
# might help to discard cases of legitimate installer or compressed data)
# If compressed data (high entropy) and is_driver => uuuuhhh, nasty
pass
def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing containing compressed data and the data makes
up for more than 20% of the total file size. The function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based of looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if (old_div((1.0 * total_compressed_data),total_pe_data_length)) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
|
|
"""
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
|
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import shutil
import sys
import urllib
import webbrowser
import wx
import launcher
class AppController(object):
"""Main application controller (MVC).
Provides logic for non-task related app action, such as adding a new
project.
"""
def __init__(self, app):
"""Create a new AppController.
Args:
app: the wx.App object
"""
self._app = app
self._frame = None # main view for our projects
self._table = None # main model for our projects
self._preferences = None # main prefs object for this app
app.Bind(wx.EVT_ACTIVATE_APP, self.OnActivateApp)
def SetModelsViews(self, frame=None, table=None, preferences=None):
"""Set models and views (MVC) for this controller.
We need a pointer to the main frame and main table. We can't do
in __init__ since those objects wants a pointer to me as well, and
one must come first. Convention for launcher is for M/V to take
controllers in their __init__, and have C take it later.
Args:
frame: The main frame (MainFrame) for the app
table: The main table (MainTable) for the app
preferences: the Preferences object for the app
"""
if frame:
self._frame = frame
if table:
self._table = table
if preferences:
self._preferences = preferences
def Add(self, event, path=None):
"""Add an existing project. Called directly from UI."""
project = self._AskForProject(launcher.AddExistingController(), path)
if project:
self._table.AddProject(project)
self.RefreshMainView()
def AddNew(self, event):
"""Add a new project. Called directly from UI."""
project = self._AskForProject(launcher.AddNewController())
if project:
self._table.AddProject(project)
self.RefreshMainView()
def _AskForProject(self, add_controller, path=None):
"""Ask the user for a project using the specified controller.
Args:
add_controller: A class to use as the Add Project Controller for
our dialog.
path: A file sytem path to pre-poplate the controller's path with.
Returns:
A launcher.Project, or None.
"""
add_controller.SetPath(path)
add_controller.SetPort(self._table.UniquePort())
if add_controller.ShowModal() == wx.ID_OK:
return add_controller.Project()
else:
return None
def _ConfirmRemove(self, message, caption):
"""Confirm the deletion of the projects by asking the user.
Args:
message: main text for the dialog
caption: caption for this dialog
Returns:
Whether we should delete
"""
wx_rtn = wx.MessageBox(message, caption, style=wx.YES_NO|wx.ICON_QUESTION)
if wx_rtn == wx.YES:
return True
return False
def Remove(self, event):
"""Remove the currently selected application from our list.
Does NOT delete the files from disk. Called directly from UI.
"""
projects = self._frame.SelectedProjects()
if not projects:
return
caption = 'Really delete these projects?'
disk_not_touched = '(Files on disk will not be touched.)'
project_descriptions = []
for project in projects:
project_descriptions.append(' %s\n' % project.name)
message = '%s\n\n%s' % (' '.join(project_descriptions),
disk_not_touched)
if self._ConfirmRemove(message, caption):
for project in projects:
self._table.RemoveProject(project)
# Selection is out of sync with projects, so clear it out.
self._frame.UnselectAll()
self.RefreshMainView()
def _FailureMessage(self, message, caption):
"""Display a warning to the user about a non-fatal failure.
Split into a seperate method for easier unit testing.
"""
wx.MessageBox(message, caption, style=wx.OK|wx.ICON_ERROR)
def Settings(self, event, settings_controller=None):
"""Show and edit settings for the currently selected application.
Called directly from UI.
Args:
event: the wx.Event that initiated this callback
settings_controller: a class to use as the Settings Controller
for our dialog. Likely only used for unit testing.
"""
projects = self._frame.SelectedProjects()
if len(projects) != 1:
self._FailureMessage('Exactly one application should be selected '
'when choosing to view or edit settings.',
'Settings Failed')
return
project = projects[0]
sc = settings_controller or launcher.SettingsController(project)
# If possibly modified (rtn is ID_OK) we need to refresh the UI.
if sc.ShowModal() == wx.ID_OK:
# In MVC, we are Controller. First, update the View.
# (The Mac launcher has KVO to do this automagically.)
self.RefreshMainView()
# Then update the Model.
self._table.SaveProjects()
def RefreshMainView(self):
"""Refresh the main view with data from our model."""
# TODO(jrg): ARGH! Access of private member
self._frame.RefreshView(self._table._projects)
def OnOpenSDK(self, evt):
"""Called (indirectly) from the Open SDK in Explorer menu item."""
self._app.OnOpenSDK(evt)
def OnExit(self, evt):
"""Called (indirectly) from the Exit menu item."""
self._app.OnExit()
def OnPreferences(self, evt, pref_controller=None):
"""Called from the Preferences menu item.
Although wx.EVT_* handlers only take 2 args (including self), we add
an extra arg (with a default value) to allow easier unit testing.
"""
controller = (pref_controller or
launcher.PrefController(self._preferences))
controller.ShowModal()
def OnAbout(self, evt, about_controller=None):
"""Called from the About menu item.
Although wx.EVT_* handlers only take 2 args (including self), we add
an extra arg (with a default value) to allow easier unit testing.
"""
controller = (about_controller or launcher.AboutBoxController())
controller.ShowModal()
def OnActivateApp(self, evt):
"""Called when the application active state changes.
Verify all projects on disk (to see if they still exist, or
have had their names changed). When done, update the view.
"""
# Note: this is not called for initial activation (on launch).
if evt.GetActive():
self._table.Verify()
self.RefreshMainView()
def Help(self, event):
"""Help on the launcher. Called directly from the UI."""
helpdir = os.path.join(os.path.dirname(sys.argv[0]), 'help/index.html')
helpdir = urllib.pathname2url(os.path.realpath(helpdir))
webbrowser.open('file:' + helpdir)
def AppEngineHelp(self, event):
"""Help on App Engine. Called directly from the UI."""
webbrowser.open('http://code.google.com/appengine/docs/')
def Demos(self, event):
"""Called by the stub demo menu item.
If we have any demos, the stub will have been replaced.
"""
logging.warning('No demos are available.')
def InstallDemoByName(self, path, dest_dir=None, prompt=True):
"""Called by a demo sub-menu item.
Copies the specified demo and adds to the current project list.
Args:
path: A full path to the demo we want to use.
dest_dir: The destination directory for the demo to be copied, or
None to use a default.
prompt: Solicit the user before copying the demo files.
Exists for unit tests to avoid a modal dialog.
"""
# Find a destination path; ensure it is unique.
dest_dir = dest_dir or wx.StandardPaths.Get().GetDocumentsDir()
basename = os.path.basename(path)
existing_files = os.listdir(dest_dir)
count = 1
newname = basename
while newname in existing_files:
newname = '%s-%d' % (basename, count)
count += 1
newpath = os.path.join(dest_dir, newname)
if prompt:
# Ask the user before doing anything.
caption = 'Install Google App Engine demo?'
message = ('Google App Engine Launcher wishes to copy the demo "%s" '
'to %s' % (basename, newpath))
allow_copy_dialog = wx.MessageDialog(None, message, caption,
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
if allow_copy_dialog.ShowModal() != wx.ID_OK:
return
# Copy over, create a project, and add it to our table.
shutil.copytree(path, newpath)
project = launcher.Project(newpath, self._table.UniquePort())
self._table.AddProject(project)
self.RefreshMainView()
def CheckForUpdates(self, event):
self._app._VersionCheck(always_dialog=True)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
from . import ntp_keys
from . import servers
class ntp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/ntp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for NTP configuration and state
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__ntp_keys", "__servers"
)
_yang_name = "ntp"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
self.__ntp_keys = YANGDynClass(
base=ntp_keys.ntp_keys,
is_container="container",
yang_name="ntp-keys",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
self.__servers = YANGDynClass(
base=servers.servers,
is_container="container",
yang_name="servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "ntp"]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /system/ntp/config (container)
YANG Description: Configuration data for NTP client.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /system/ntp/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for NTP client.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /system/ntp/state (container)
YANG Description: Operational state data for NTP services.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /system/ntp/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for NTP services.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
def _get_ntp_keys(self):
"""
Getter method for ntp_keys, mapped from YANG variable /system/ntp/ntp_keys (container)
YANG Description: Enclosing container for list of NTP authentication keys
"""
return self.__ntp_keys
def _set_ntp_keys(self, v, load=False):
"""
Setter method for ntp_keys, mapped from YANG variable /system/ntp/ntp_keys (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ntp_keys is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ntp_keys() directly.
YANG Description: Enclosing container for list of NTP authentication keys
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=ntp_keys.ntp_keys,
is_container="container",
yang_name="ntp-keys",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ntp_keys must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=ntp_keys.ntp_keys, is_container='container', yang_name="ntp-keys", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__ntp_keys = t
if hasattr(self, "_set"):
self._set()
def _unset_ntp_keys(self):
self.__ntp_keys = YANGDynClass(
base=ntp_keys.ntp_keys,
is_container="container",
yang_name="ntp-keys",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
def _get_servers(self):
"""
Getter method for servers, mapped from YANG variable /system/ntp/servers (container)
YANG Description: Enclosing container for the list of NTP servers
"""
return self.__servers
def _set_servers(self, v, load=False):
"""
Setter method for servers, mapped from YANG variable /system/ntp/servers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_servers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_servers() directly.
YANG Description: Enclosing container for the list of NTP servers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=servers.servers,
is_container="container",
yang_name="servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """servers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=servers.servers, is_container='container', yang_name="servers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__servers = t
if hasattr(self, "_set"):
self._set()
def _unset_servers(self):
self.__servers = YANGDynClass(
base=servers.servers,
is_container="container",
yang_name="servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
ntp_keys = __builtin__.property(_get_ntp_keys, _set_ntp_keys)
servers = __builtin__.property(_get_servers, _set_servers)
_pyangbind_elements = OrderedDict(
[
("config", config),
("state", state),
("ntp_keys", ntp_keys),
("servers", servers),
]
)
|
|
"""
Differential operators, for all sympy expressions
For multivector-customized differential operators, see :class:`galgebra.mv.Dop`.
"""
import copy
import numbers
import warnings
from sympy import Symbol, S, Add, simplify, diff, Expr
from . import printer
from . import metric
from . import mv
from .printer import ZERO_STR
def _consolidate_terms(terms):
"""
Remove zero coefs and consolidate coefs with repeated pdiffs.
"""
new_coefs = []
new_pdiffs = []
for (coef, pd) in terms:
if coef != S(0):
if pd in new_pdiffs:
index = new_pdiffs.index(pd)
new_coefs[index] += coef
else:
new_coefs.append(coef)
new_pdiffs.append(pd)
return tuple(zip(new_coefs, new_pdiffs))
def _merge_terms(terms1, terms2):
""" Concatenate and consolidate two sets of already-consolidated terms """
pdiffs1 = [pdiff for _, pdiff in terms1]
pdiffs2 = [pdiff for _, pdiff in terms2]
pdiffs = pdiffs1 + [x for x in pdiffs2 if x not in pdiffs1]
coefs = len(pdiffs) * [S(0)]
for coef, pdiff in terms1:
index = pdiffs.index(pdiff)
coefs[index] += coef
for coef, pdiff in terms2:
index = pdiffs.index(pdiff)
coefs[index] += coef
# remove zeros
return [(coef, pdiff) for coef, pdiff in zip(coefs, pdiffs) if coef != S(0)]
################ Scalar Partial Differential Operator Class ############
class Sdop(object):
"""
Scalar differential operator is of the form (Einstein summation)
.. math:: D = c_{i}*D_{i}
where the :math:`c_{i}`'s are scalar coefficient (they could be functions)
and the :math:`D_{i}`'s are partial differential operators (:class:`Pdop`).
Attributes
----------
terms : tuple of tuple
the structure :math:`((c_{1},D_{1}),(c_{2},D_{2}), ...)`
"""
str_mode = False
def TSimplify(self):
return Sdop([
(metric.Simp.apply(coef), pdiff) for (coef, pdiff) in self.terms
])
@staticmethod
def consolidate_coefs(sdop):
"""
Remove zero coefs and consolidate coefs with repeated pdiffs.
"""
if isinstance(sdop, Sdop):
return Sdop(_consolidate_terms(sdop.terms))
else:
return _consolidate_terms(sdop)
def simplify(self, modes=simplify):
return Sdop([
(metric.apply_function_list(modes, coef), pdiff)
for coef, pdiff in self.terms
])
def _with_sorted_terms(self):
new_terms = sorted(self.terms, key=lambda term: Pdop.sort_key(term[1]))
return Sdop(new_terms)
def Sdop_str(self):
if len(self.terms) == 0:
return ZERO_STR
self = self._with_sorted_terms()
s = ''
for (coef, pdop) in self.terms:
coef_str = printer.latex(coef)
pd_str = printer.latex(pdop)
if coef == S(1):
s += pd_str
elif coef == S(-1):
s += '-' + pd_str
else:
if isinstance(coef, Add):
s += '(' + coef_str + ')*' + pd_str
else:
s += coef_str + '*' + pd_str
s += ' + '
s = s.replace('+ -','- ')
s = s[:-3]
if Sdop.str_mode:
if len(self.terms) > 1 or isinstance(self.terms[0][0], Add):
s = '(' + s + ')'
return s
def Sdop_latex_str(self):
if len(self.terms) == 0:
return ZERO_STR
self = self._with_sorted_terms()
s = ''
for (coef, pdop) in self.terms:
coef_str = printer.latex(coef)
pd_str = printer.latex(pdop)
if coef == S(1):
if pd_str == '':
s += '1'
else:
s += pd_str
elif coef == S(-1):
if pd_str == '':
s += '-1'
else:
s += '-' + pd_str
else:
if isinstance(coef, Add):
s += r'\left ( ' + coef_str + r'\right ) ' + pd_str
else:
s += coef_str + ' ' + pd_str
s += ' + '
s = s.replace('+ -','- ')
return s[:-3]
def _repr_latex_(self):
latex_str = printer.GaLatexPrinter.latex(self)
return ' ' + latex_str + ' '
def __str__(self):
if printer.GaLatexPrinter.latex_flg:
Printer = printer.GaLatexPrinter
else:
Printer = printer.GaPrinter
return Printer().doprint(self)
def __repr__(self):
return str(self)
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0],Symbol): # Simple Pdop of order 1
self.terms = ((S(1), Pdop(args[0])),)
else:
if len(args) == 2 and isinstance(args[0],list) and isinstance(args[1],list):
if len(args[0]) != len(args[1]):
raise ValueError('In Sdop.__init__ coefficent list and Pdop list must be same length.')
self.terms = tuple(zip(args[0], args[1]))
elif len(args) == 1 and isinstance(args[0], (list, tuple)):
self.terms = tuple(args[0])
else:
raise ValueError('In Sdop.__init__ length of args must be 1 or 2 args = '+str(args))
def __call__(self, arg):
if isinstance(arg, Sdop):
terms = []
for (coef, pdiff) in self.terms:
new_terms = pdiff(arg.terms)
new_terms = [(coef * c, p) for c, p in new_terms]
terms += new_terms
product = Sdop(terms)
return Sdop.consolidate_coefs(product)
else:
return sum([coef * pdiff(arg) for coef, pdiff in self.terms], S(0))
def __neg__(self):
return Sdop([(-coef, pdiff) for coef, pdiff in self.terms])
@staticmethod
def Add(sdop1, sdop2):
if isinstance(sdop1, Sdop) and isinstance(sdop2, Sdop):
return Sdop(_merge_terms(sdop1.terms, sdop2.terms))
else:
# convert values to multiplicative operators
if isinstance(sdop1, Sdop):
sdop2 = Sdop([(sdop2, Pdop({}))])
elif isinstance(sdop2, Sdop):
sdop1 = Sdop([(sdop1, Pdop({}))])
else:
raise TypeError("Neither argument is a Dop instance")
return Sdop.Add(sdop1, sdop2)
def __eq__(self, other):
if isinstance(other, Sdop):
diff = self - other
return len(diff.terms) == 0
else:
return NotImplemented
def __add__(self, sdop):
return Sdop.Add(self, sdop)
def __radd__(self, sdop):
return Sdop.Add(sdop, self)
def __sub__(self, sdop):
return Sdop.Add(self, -sdop)
def __rsub__(self, sdop):
return Sdop.Add(-self, sdop)
def __mul__(self, sdopr):
# alias for applying the operator
return self.__call__(sdopr)
def __rmul__(self, sdop):
terms = [(sdop * coef, pdiff) for coef, pdiff in self.terms]
return Sdop(terms)
#################### Partial Derivative Operator Class #################
class Pdop(object):
r"""
Partial derivative operatorp.
The partial derivatives are of the form
.. math::
\partial_{i_{1}...i_{n}} =
\frac{\partial^{i_{1}+...+i_{n}}}{\partial{x_{1}^{i_{1}}}...\partial{x_{n}^{i_{n}}}}.
If :math:`i_{j} = 0` then the partial derivative does not contain the
:math:`x^{i_{j}}` coordinate.
Attributes
----------
pdiffs : dict
A dictionary where coordinates are keys and key value are the number of
times one differentiates with respect to the key.
order : int
Total number of differentiations.
When this is zero (i.e. when :attr:`pdiffs` is ``{}``) then this object
is the identity operator, and returns its operand unchanged.
"""
def sort_key(self, order=None):
return (
# lower order derivatives first
self.order,
# sorted by symbol after that, after expansion
sorted([
x.sort_key(order)
for x, k in self.pdiffs.items()
for i in range(k)
])
)
def __eq__(self,A):
if isinstance(A, Pdop) and self.pdiffs == A.pdiffs:
return True
else:
if len(self.pdiffs) == 0 and A == S(1):
return True
return False
def __init__(self, __arg):
"""
The partial differential operator is a partial derivative with
respect to a set of real symbols (variables).
"""
# galgebra 0.4.5
if __arg is None:
warnings.warn(
"`Pdop(None)` is deprecated, use `Pdop({})` instead",
DeprecationWarning, stacklevel=2)
__arg = {}
if isinstance(__arg, dict): # Pdop defined by dictionary
self.pdiffs = __arg
elif isinstance(__arg, Symbol): # First order derivative with respect to symbol
self.pdiffs = {__arg: 1}
else:
raise TypeError('A dictionary or symbol is required, got {!r}'.format(__arg))
self.order = sum(self.pdiffs.values())
def factor(self):
"""
If partial derivative operator self.order > 1 factor out first
order differential operator. Needed for application of partial
derivative operator to product of sympy expression and partial
differential operator. For example if ``D = Pdop({x:3})`` then::
(Pdop({x:2}), Pdop({x:1})) = D.factor()
"""
if self.order == 1:
return S(0), self
else:
new_pdiffs = self.pdiffs.copy()
x, n = next(iter(new_pdiffs.items()))
if n == 1:
del new_pdiffs[x]
else:
new_pdiffs[x] -= 1
return Pdop(new_pdiffs), Pdop(x)
def __call__(self, arg):
"""
Calculate nth order partial derivative (order defined by
self) of :class:`~galgebra.mv.Mv`, :class:`Dop`, :class:`Sdop` or sympy expression
"""
if self.pdiffs == {}:
return arg # result is Pdop identity (1)
if isinstance(arg, Pdop): # arg is Pdop
if arg.pdiffs == {}: # arg is one
return self
#return S(0) # derivative is zero
else: # arg is partial derivative
pdiffs = copy.copy(arg.pdiffs)
for key in self.pdiffs:
if key in pdiffs:
pdiffs[key] += self.pdiffs[key]
else:
pdiffs[key] = self.pdiffs[key]
return Pdop(pdiffs) # result is Pdop
elif isinstance(arg, mv.Mv): # arg is multivector
ga = arg.Ga
for x in self.pdiffs:
for i in range(self.pdiffs[x]):
arg = ga.pDiff(arg, x)
return arg # result is multivector
elif isinstance(arg, (Expr, Symbol, numbers.Number)): # arg is sympy expression
for x in self.pdiffs:
arg = diff(arg,x,self.pdiffs[x])
return arg # derivative is sympy expression
elif isinstance(arg, (list, tuple)): # arg is list of tuples (coef, partial derivative)
terms = list(arg)
D = self
while True:
D, D0 = D.factor()
for k, term in enumerate(terms):
dc = D0(term[0])
pd = D0(term[1])
#print 'D0, term, dc, pd =', D0, term, dc, pd
tmp = []
if dc != 0:
tmp.append((dc,term[1]))
if pd != 0 :
tmp.append((term[0],pd))
terms[k] = tmp
terms = [i for o in terms for i in o] # flatten list one level
if D == 0:
break
terms = Sdop.consolidate_coefs(terms)
return terms # result is list of tuples (coef, partial derivative)
elif isinstance(arg, Sdop): # arg is scalar differential operator
return self(arg.terms) # result is list of tuples (coef, partial derivative)
else:
raise ValueError('In Pdop.__call__ type(arg) = ' + str(type(arg)) + ' not allowed.')
def __mul__(self, pdop): # functional product of self and arg (self*arg)
return self(pdop)
def __rmul__(self, pdop): # functional product of arg and self (arg*self)
if isinstance(pdop, Pdop):
return pdop(self)
return Sdop([(pdop, self)])
def Pdop_str(self):
if self.order == 0:
return 'D{}'
s = 'D'
for x in self.pdiffs:
s += '{' + str(x) + '}'
n = self.pdiffs[x]
if n > 1:
s += '^' + str(n)
return s
def Pdop_latex_str(self):
if self.order == 0:
return ''
s = r'\frac{\partial'
if self.order > 1:
s += '^{' + printer.latex(self.order) + '}'
s += '}{'
keys = list(self.pdiffs.keys())
keys.sort(key=lambda x: x.sort_key())
for key in keys:
i = self.pdiffs[key]
s += r'\partial ' + printer.latex(key)
if i > 1:
s += '^{' + printer.latex(i) + '}'
s += '}'
return s
def _repr_latex_(self):
latex_str = printer.GaLatexPrinter.latex(self)
return ' ' + latex_str + ' '
def __str__(self):
if printer.GaLatexPrinter.latex_flg:
Printer = printer.GaLatexPrinter
else:
Printer = printer.GaPrinter
return Printer().doprint(self)
def __repr__(self):
return str(self)
|
|
from extendable_cards.visual_main import card_interaction
from extendable_cards.lib.cards import Card, CardOrganizer, CardController
#=============== TEST DRAW CARDS TO HAND =====================#
def test_draw_none_one():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#draw # or d # -- to draw that many cards
line = "d "
result = card_interaction(control, line)
assert result
assert control.hand.get_len() == 0
assert control.deck.get_len() == 4
line = "d 1"
result = card_interaction(control, line)
assert result
assert control.hand.get_len() == 1
assert control.deck.get_len() == 3
assert control.hand.get_card("test_card_1")
def test_draw_many():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#draw # or d # -- to draw that many cards
line = "draw 3"
result = card_interaction(control, line)
assert result
assert control.hand.get_len() == 3
assert control.deck.get_len() == 1
assert control.deck.get_card("test")
assert not control.deck.get_card("test_card_2")
def test_draw_none_one():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#draw # or d # -- to draw that many cards
line = "d 8"
result = card_interaction(control, line)
assert result
assert control.hand.get_len() == 4
assert control.deck.get_len() == 0
#=============== TEST SHUFFLE CARDS =====================#
def test_shuffle():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#shuffle or s -- to shuffle deck
line = "s"
shuffled = False
for i in range(10):
result = card_interaction(control, line)
assert result
shuffled = (control.deck.get_top_cards(1)[0].name != "test_card_1") or shuffled
assert shuffled
#=============== TEST PLAY CARDS =====================#
def test_play_hand_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#ph <card name> -- to play card from hand
line = "ph test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
def test_play_hand_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#ph <card name> -- to play card from hand
line = "ph tes"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 4
assert control.in_play.get_len() == 0
line = "ph crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 3
assert control.in_play.get_len() == 1
assert control.in_play.get_card("crazy_card_name_boogie")
def test_play_hand_fullname():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#ph <card name> -- to play card from hand
line = "ph test"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 3
assert control.in_play.get_len() == 1
assert control.in_play.get_top_cards(1)[0].name == "test"
#ph <card name> -- to play card from hand
line = "ph test_card_2"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 2
assert control.in_play.get_len() == 2
assert control.in_play.get_card("test_card_2")
def test_play_deck_invalid_entry():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pd <card name> -- to play a card from deck
line = "pd "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
#pd <card name> -- to play a card from deck
line = "pd crazy_test"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
def test_play_deck_invalid_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pd <card name> -- to play a card from deck
line = "pd test_card"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
#pd <card name> -- to play a card from deck
line = "pd te"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
def test_play_deck_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#pd <card name> -- to play a card from deck
line = "pd "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 4
assert control.in_play.get_len() == 0
def test_play_deck_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pd <card name> -- to play a card from deck
line = "pd crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 3
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 1
def test_play_deck_fullname():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pd <card name> -- to play a card from deck
line = "pd test"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 3
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 1
assert control.in_play.get_top_cards(1)[0].name == "test"
#pd <card name> -- to play a card from deck
line = "pd crazy_card_name_boogie"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 2
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 2
assert control.in_play.get_top_cards(1)[0].name == "test"
assert control.in_play.get_card("crazy_card_name_boogie")
def test_play_deck_top():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pt # -- to play that many cards from top of deck
line = "pt "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 0
#pt # -- to play that many cards from top of deck
line = "pt 1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 3
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 1
assert control.in_play.get_top_cards(1)[0].name == "test_card_1"
def test_play_deck_top_many():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#pt # -- to play that many cards from top of deck
line = "pt 6"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 0
assert control.in_play.get_len() == 4
def test_play_deck_top_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#pt # -- to play that many cards from top of deck
line = "pt 1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 4
assert control.in_play.get_len() == 0
#=============== TEST TOSS CARD INTO DISCARD =====================#
def test_toss_hand():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#th <card name> -- to discard that card from hand
line = "th "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 4
assert control.discard.get_len() == 0
#th <card name> -- to discard that card from hand
line = "th test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 3
assert control.discard.get_len() == 1
assert control.discard.get_card("test_card_1")
def test_toss_hand_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#th <card name> -- to discard that card from hand
line = "th test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.discard.get_len() == 0
def test_toss_hand_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
#th <card name> -- to discard that card from hand
line = "th crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 3
assert control.discard.get_len() == 1
def test_toss_deck_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'hand'})
control = CardController(hand=standard_deck)
line = "td 1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 4
assert control.discard.get_len() == 0
def test_toss_deck():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#td # -- to discard that many cards from top of deck
line = "td "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.discard.get_len() == 0
#td # -- to discard that many cards from top of deck
line = "td 1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 3
assert control.hand.get_len() == 0
assert control.discard.get_len() == 1
assert control.discard.get_card("test_card_1")
def test_toss_deck_many():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#td # -- to discard that many cards from top of deck
line = "td 6"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.hand.get_len() == 0
assert control.discard.get_len() == 4
def test_toss_play():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#tp <card name> -- to discard a card from play
line = "tp "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 4
assert control.discard.get_len() == 0
#tp <card name> -- to discard a card from play
line = "tp test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 3
assert control.discard.get_len() == 1
assert control.discard.get_card("test_card_1")
def test_toss_play_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#tp <card name> -- to discard a card from play
line = "tp test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.in_play.get_len() == 0
assert control.discard.get_len() == 0
def test_toss_play_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#tp <card name> -- to discard a card from play
line = "tp crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 3
assert control.discard.get_len() == 1
#=============== TEST REMOVE CARD FROM GAME =====================#
def test_toss_play_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#kill <card name> or k <card name> -- to remove card from deck
line = "kill test_card_2"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 3
assert control.in_play.get_len() == 0
assert control.discard.get_len() == 0
assert control.hand.get_len() == 0
assert not control.deck.get_card("test_card_2")
#=============== TEST BRING CARDS BACK FROM DISCARD =====================#
#bh <card name> -- to bring card from discard to hand
def test_bring_back_to_hand():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bh <card name> -- to bring card from discard to hand
line = "bh "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.discard.get_len() == 4
assert control.hand.get_len() == 0
#bh <card name> -- to bring card from discard to hand
line = "bh test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.discard.get_len() == 3
assert control.hand.get_len() == 1
assert control.hand.get_card("test_card_1")
def test_bring_back_to_hand_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#bh <card name> -- to bring card from discard to hand
line = "bh test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.hand.get_len() == 0
assert control.discard.get_len() == 0
def test_bring_back_to_hand_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bh <card name> -- to bring card from discard to hand
line = "bh crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.discard.get_len() == 3
assert control.hand.get_len() == 1
def test_bring_back_to_deck():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bd <card name> -- to bring card from discard to deck
line = "bd "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.discard.get_len() == 4
#bd <card name> -- to bring card from discard to deck
line = "bd test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.discard.get_len() == 3
assert control.deck.get_card("test_card_1")
def test_bring_back_to_deck_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#bd <card name> -- to bring card from discard to deck
line = "bd test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.discard.get_len() == 0
def test_bring_back_to_deck_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bd <card name> -- to bring card from discard to deck
line = "bd crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.discard.get_len() == 3
#####
def test_bring_back_to_deck_top():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bdt <card name> -- to bring card from discard to top of deck
line = "bdt "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.discard.get_len() == 4
#bdt <card name> -- to bring card from discard to top of deck
line = "bdt test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.discard.get_len() == 3
assert control.deck.get_top_cards(1)[0].name == "test_card_1"
def test_bring_back_to_deck_top_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#bdt <card name> -- to bring card from discard to top of deck
line = "bdt test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.discard.get_len() == 0
def test_bring_back_to_deck_top_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'discard'})
control = CardController(discard=standard_deck)
#bdt <card name> -- to bring card from discard to top of deck
line = "bdt crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.discard.get_len() == 3
#=============== TEST RETURN CARD FROM IN PLAY =====================#
def test_return_to_deck():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#rd <card name> -- to return card from play to top of deck
line = "rd "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 4
assert control.discard.get_len() == 0
#rd <card name> -- to return card from play to top of deck
line = "rd test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.in_play.get_len() == 3
assert control.discard.get_len() == 0
assert control.deck.get_card("test_card_1")
def test_return_to_deck_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#rd <card name> -- to return card from play to top of deck
line = "rd test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.in_play.get_len() == 0
assert control.discard.get_len() == 0
def test_return_to_deck_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#rd <card name> -- to return card from play to top of deck
line = "rd crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 1
assert control.in_play.get_len() == 3
assert control.discard.get_len() == 0
def test_return_to_hand():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#rh <card name> -- to return card from play to hand
line = "rh "
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 4
assert control.discard.get_len() == 0
#rh <card name> -- to return card from play to hand
line = "rh test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 3
assert control.hand.get_len() == 1
assert control.hand.get_card("test_card_1")
def test_return_to_hand_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#rh <card name> -- to return card from play to hand
line = "rh test_card_1"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 4
assert control.in_play.get_len() == 0
assert control.discard.get_len() == 0
def test_return_to_hand_substring():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'in play'})
control = CardController(in_play=standard_deck)
#rh <card name> -- to return card from play to hand
line = "rh crazy"
result = card_interaction(control, line)
assert result
assert control.deck.get_len() == 0
assert control.in_play.get_len() == 3
assert control.hand.get_len() == 1
#=============== TEST QUIT GAME =====================#
def test_toss_play_empty():
obj_cards = [Card("test_card_1"), Card("test_card_2"), Card("crazy_card_name_boogie"), Card("test")]
standard_deck=CardOrganizer(obj_cards, context={'label': 'deck'})
control = CardController(deck=standard_deck)
#quit or q -- to Quit
line = "q"
#TODO figure out how to capture sys.exit
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from keystoneclient import exceptions as keystone_exceptions
import mock
from rally.cli.commands import deployment
from rally.cli import envutils
from rally.common import objects
from rally import consts
from rally import exceptions
from tests.unit import fakes
from tests.unit import test
class DeploymentCommandsTestCase(test.TestCase):
def setUp(self):
super(DeploymentCommandsTestCase, self).setUp()
self.deployment = deployment.DeploymentCommands()
@mock.patch.dict(os.environ, {"RALLY_DEPLOYMENT": "my_deployment_id"})
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
@mock.patch("rally.cli.commands.deployment.api.Deployment.create")
@mock.patch("rally.cli.commands.deployment.open",
side_effect=mock.mock_open(read_data="{\"some\": \"json\"}"),
create=True)
def test_create(self, mock_open, mock_deployment_create,
mock_deployment_commands_list):
self.deployment.create("fake_deploy", False, "path_to_config.json")
mock_deployment_create.assert_called_once_with(
{"some": "json"}, "fake_deploy")
@mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url",
"OS_USERNAME": "fake_username",
"OS_PASSWORD": "fake_password",
"OS_TENANT_NAME": "fake_tenant_name",
"OS_REGION_NAME": "fake_region_name",
"OS_ENDPOINT": "fake_endpoint",
"RALLY_DEPLOYMENT": "fake_deployment_id"})
@mock.patch("rally.cli.commands.deployment.api.Deployment.create")
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
def test_createfromenv(self, mock_list, mock_deployment_create):
self.deployment.create("from_env", True)
mock_deployment_create.assert_called_once_with(
{
"type": "ExistingCloud",
"auth_url": "fake_auth_url",
"region_name": "fake_region_name",
"endpoint": "fake_endpoint",
"admin": {
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name"
}
},
"from_env"
)
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.use")
@mock.patch("rally.cli.commands.deployment.api.Deployment.create",
return_value=dict(uuid="uuid"))
@mock.patch("rally.cli.commands.deployment.open",
side_effect=mock.mock_open(read_data="{\"uuid\": \"uuid\"}"),
create=True)
def test_create_and_use(self, mock_open, mock_deployment_create,
mock_deployment_commands_use,
mock_deployment_commands_list):
self.deployment.create("fake_deploy", False, "path_to_config.json",
True)
mock_deployment_create.assert_called_once_with(
{"uuid": "uuid"}, "fake_deploy")
mock_deployment_commands_use.assert_called_once_with("uuid")
@mock.patch("rally.cli.commands.deployment.api.Deployment.recreate")
def test_recreate(self, mock_deployment_recreate):
deployment_id = "43924f8b-9371-4152-af9f-4cf02b4eced4"
self.deployment.recreate(deployment_id)
mock_deployment_recreate.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_recreate_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.recreate, None)
@mock.patch("rally.cli.commands.deployment.api.Deployment.destroy")
def test_destroy(self, mock_deployment_destroy):
deployment_id = "53fd0273-60ce-42e5-a759-36f1a683103e"
self.deployment.destroy(deployment_id)
mock_deployment_destroy.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_destroy_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.destroy, None)
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
@mock.patch("rally.cli.commands.deployment.db.deployment_list")
def test_list_different_deployment_id(self, mock_deployment_list,
mock_get_global, mock_struct,
mock_print_list):
current_deployment_id = "26a3ce76-0efa-40e4-86e5-514574bd1ff6"
mock_get_global.return_value = current_deployment_id
fake_deployment_list = [
{"uuid": "fa34aea2-ae2e-4cf7-a072-b08d67466e3e",
"created_at": "03-12-2014",
"name": "dep1",
"status": "deploy->started",
"active": "False"}]
mock_deployment_list.return_value = fake_deployment_list
self.deployment.list()
fake_deployment = fake_deployment_list[0]
fake_deployment["active"] = ""
mock_struct.assert_called_once_with(**fake_deployment)
headers = ["uuid", "created_at", "name", "status", "active"]
mock_print_list.assert_called_once_with([mock_struct()], headers,
sortby_index=headers.index(
"created_at"))
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
@mock.patch("rally.cli.commands.deployment.db.deployment_list")
def test_list_current_deployment_id(self, mock_deployment_list,
mock_get_global, mock_struct,
mock_print_list):
current_deployment_id = "64258e84-ffa1-4011-9e4c-aba07bdbcc6b"
mock_get_global.return_value = current_deployment_id
fake_deployment_list = [{"uuid": current_deployment_id,
"created_at": "13-12-2014",
"name": "dep2",
"status": "deploy->finished",
"active": "True"}]
mock_deployment_list.return_value = fake_deployment_list
self.deployment.list()
fake_deployment = fake_deployment_list[0]
fake_deployment["active"] = "*"
mock_struct.assert_called_once_with(**fake_deployment)
headers = ["uuid", "created_at", "name", "status", "active"]
mock_print_list.assert_called_once_with([mock_struct()], headers,
sortby_index=headers.index(
"created_at"))
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
@mock.patch("json.dumps")
def test_config(self, mock_json_dumps, mock_deployment_get):
deployment_id = "fa4a423e-f15d-4d83-971a-89574f892999"
value = {"config": "config"}
mock_deployment_get.return_value = value
self.deployment.config(deployment_id)
mock_json_dumps.assert_called_once_with(value["config"],
sort_keys=True, indent=4)
mock_deployment_get.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_config_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.config, None)
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_show(self, mock_deployment_get, mock_struct, mock_print_list):
deployment_id = "b1a6153e-a314-4cb3-b63b-cf08c1a416c3"
value = {
"admin": {
"auth_url": "url",
"username": "u",
"password": "p",
"tenant_name": "t",
"region_name": "r",
"endpoint_type": consts.EndpointType.INTERNAL
},
"users": []
}
mock_deployment_get.return_value = value
self.deployment.show(deployment_id)
mock_deployment_get.assert_called_once_with(deployment_id)
headers = ["auth_url", "username", "password", "tenant_name",
"region_name", "endpoint_type"]
fake_data = ["url", "u", "***", "t", "r", consts.EndpointType.INTERNAL]
mock_struct.assert_called_once_with(**dict(zip(headers, fake_data)))
mock_print_list.assert_called_once_with([mock_struct()], headers)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_deploy_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.show, None)
@mock.patch("os.remove")
@mock.patch("os.symlink")
@mock.patch("rally.cli.commands.deployment.db.deployment_get",
return_value=fakes.FakeDeployment(
uuid="593b683c-4b16-4b2b-a56b-e162bd60f10b"))
@mock.patch("os.path.exists", return_value=True)
@mock.patch("rally.common.fileutils.update_env_file")
def test_use(self, mock_update_env_file, mock_path_exists,
mock_deployment_get, mock_symlink, mock_remove):
deployment_id = mock_deployment_get.return_value["uuid"]
mock_deployment_get.return_value["admin"] = {
"auth_url": "fake_auth_url",
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name",
"endpoint": "fake_endpoint",
"region_name": None}
with mock.patch("rally.cli.commands.deployment.open", mock.mock_open(),
create=True) as mock_file:
self.deployment.use(deployment_id)
self.assertEqual(2, mock_path_exists.call_count)
mock_update_env_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"),
"RALLY_DEPLOYMENT", "%s\n" % deployment_id)
mock_file.return_value.write.assert_any_call(
"export OS_ENDPOINT=fake_endpoint\n")
mock_file.return_value.write.assert_any_call(
"export OS_AUTH_URL=fake_auth_url\n"
"export OS_USERNAME=fake_username\n"
"export OS_PASSWORD=fake_password\n"
"export OS_TENANT_NAME=fake_tenant_name\n")
mock_symlink.assert_called_once_with(
os.path.expanduser("~/.rally/openrc-%s" % deployment_id),
os.path.expanduser("~/.rally/openrc"))
mock_remove.assert_called_once_with(os.path.expanduser(
"~/.rally/openrc"))
@mock.patch("os.remove")
@mock.patch("os.symlink")
@mock.patch("rally.cli.commands.deployment.db.deployment_get",
return_value=fakes.FakeDeployment(
uuid="593b683c-4b16-4b2b-a56b-e162bd60f10b"))
@mock.patch("os.path.exists", return_value=True)
@mock.patch("rally.common.fileutils.update_env_file")
def test_use_with_v3_auth(self, mock_update_env_file, mock_path_exists,
mock_deployment_get, mock_symlink, mock_remove):
deployment_id = mock_deployment_get.return_value["uuid"]
mock_deployment_get.return_value["admin"] = {
"auth_url": "http://localhost:5000/v3",
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name",
"endpoint": "fake_endpoint",
"region_name": None,
"user_domain_name": "fake_user_domain",
"project_domain_name": "fake_project_domain"}
with mock.patch("rally.cli.commands.deployment.open", mock.mock_open(),
create=True) as mock_file:
self.deployment.use(deployment_id)
self.assertEqual(2, mock_path_exists.call_count)
mock_update_env_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"),
"RALLY_DEPLOYMENT", "%s\n" % deployment_id)
mock_file.return_value.write.assert_any_call(
"export OS_ENDPOINT=fake_endpoint\n")
mock_file.return_value.write.assert_any_call(
"export OS_AUTH_URL=http://localhost:5000/v3\n"
"export OS_USERNAME=fake_username\n"
"export OS_PASSWORD=fake_password\n"
"export OS_TENANT_NAME=fake_tenant_name\n")
mock_file.return_value.write.assert_any_call(
"export OS_USER_DOMAIN_NAME=fake_user_domain\n"
"export OS_PROJECT_DOMAIN_NAME=fake_project_domain\n")
mock_symlink.assert_called_once_with(
os.path.expanduser("~/.rally/openrc-%s" % deployment_id),
os.path.expanduser("~/.rally/openrc"))
mock_remove.assert_called_once_with(os.path.expanduser(
"~/.rally/openrc"))
@mock.patch("rally.cli.commands.deployment.DeploymentCommands."
"_update_openrc_deployment_file")
@mock.patch("rally.common.fileutils.update_globals_file")
@mock.patch("rally.cli.commands.deployment.db")
def test_use_by_name(self, mock_db, mock_update_globals_file,
mock__update_openrc_deployment_file):
fake_deployment = fakes.FakeDeployment(
uuid="fake_uuid",
admin="fake_endpoints")
mock_db.deployment_list.return_value = [fake_deployment]
mock_db.deployment_get.return_value = fake_deployment
status = self.deployment.use(deployment="fake_name")
self.assertIsNone(status)
mock_db.deployment_get.assert_called_once_with("fake_name")
mock_update_globals_file.assert_called_once_with(
envutils.ENV_DEPLOYMENT, "fake_uuid")
mock__update_openrc_deployment_file.assert_called_once_with(
"fake_uuid", "fake_endpoints")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_deployment_not_found(self, mock_deployment_get):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
mock_deployment_get.side_effect = exceptions.DeploymentNotFound(
uuid=deployment_id)
self.assertEqual(1, self.deployment.use(deployment_id))
@mock.patch("rally.osclients.Clients.verified_keystone")
@mock.patch("rally.osclients.Clients.keystone")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_deployment_check(self, mock_deployment_get, mock_clients_keystone,
mock_clients_verified_keystone):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
sample_endpoint = objects.Endpoint("http://192.168.1.1:5000/v2.0/",
"admin",
"adminpass").to_dict()
mock_deployment_get.return_value = {"admin": sample_endpoint,
"users": [sample_endpoint]}
self.deployment.check(deployment_id)
mock_deployment_get.assert_called_once_with(deployment_id)
@mock.patch("rally.osclients.Clients.verified_keystone")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_deployment_check_raise(self, mock_deployment_get,
mock_clients_verified_keystone):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
sample_endpoint = objects.Endpoint("http://192.168.1.1:5000/v2.0/",
"admin",
"adminpass").to_dict()
sample_endpoint["not-exist-key"] = "error"
mock_deployment_get.return_value = {"admin": sample_endpoint}
mock_clients_verified_keystone.services.list.return_value = []
self.assertRaises(TypeError, self.deployment.check, deployment_id)
@mock.patch("rally.osclients.Clients.verified_keystone")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_deployment_check_not_exist(self, mock_deployment_get,
mock_clients_verified_keystone):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
mock_deployment_get.side_effect = exceptions.DeploymentNotFound()
mock_clients_verified_keystone.services.list.return_value = []
self.assertEqual(self.deployment.check(deployment_id), 1)
@mock.patch("rally.osclients.Clients.services")
@mock.patch("rally.cli.commands.deployment.db.deployment_get")
def test_deployment_check_connect_failed(self, mock_deployment_get,
mock_clients_services):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
sample_endpoint = objects.Endpoint("http://192.168.1.1:5000/v2.0/",
"admin",
"adminpass").to_dict()
mock_deployment_get.return_value = {"admin": sample_endpoint}
refused = keystone_exceptions.ConnectionRefused()
mock_clients_services.side_effect = refused
self.assertEqual(self.deployment.check(deployment_id), 1)
|
|
from __future__ import unicode_literals
import re
from django.db import models
class Accreds(models.Model):
sciper = models.CharField(max_length=8)
unite = models.CharField(max_length=8)
fonction = models.CharField(max_length=128, blank=True, null=True)
ordre = models.DecimalField(max_digits=3, decimal_places=0, blank=True, null=True)
statut = models.IntegerField(blank=True, null=True)
classe = models.IntegerField(blank=True, null=True)
datedeb = models.DateTimeField(blank=True, null=True)
telephone1 = models.CharField(max_length=16, blank=True, null=True)
telephone2 = models.CharField(max_length=16, blank=True, null=True)
local = models.CharField(max_length=25, blank=True, null=True)
comptead = models.CharField(max_length=1, blank=True, null=True)
stockindiv = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'Accreds'
class Personnes(models.Model):
sciper = models.CharField(max_length=8, primary_key=True)
nom = models.CharField(max_length=64, blank=True, null=True)
prenom = models.CharField(max_length=64, blank=True, null=True)
username = models.CharField(max_length=64, blank=True, null=True)
home = models.CharField(max_length=64, blank=True, null=True)
shell = models.CharField(max_length=32, blank=True, null=True)
email = models.CharField(max_length=64, blank=True, null=True)
physemail = models.CharField(max_length=64, blank=True, null=True)
uid = models.IntegerField(blank=True, null=True)
type = models.CharField(max_length=1, blank=True, null=True)
adrpost = models.CharField(max_length=128, blank=True, null=True)
cardid = models.CharField(max_length=32, blank=True, null=True)
cardstatus = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'Personnes'
def get_full_name(self):
return '{} {}'.format(self.nom, self.prenom)
class TStudMultipleAccredHistory(models.Model):
sciper = models.CharField(db_column='SCIPER', max_length=8)
new_sciper = models.CharField(db_column='NEW_SCIPER', max_length=12, blank=True, null=True)
username = models.CharField(db_column='USERNAME', max_length=64, blank=True, null=True)
unite_id1 = models.CharField(db_column='UNITE_ID1', max_length=50, blank=True, null=True)
ordre1 = models.CharField(db_column='ORDRE1', max_length=64, blank=True, null=True)
unite_id2 = models.CharField(db_column='UNITE_ID2', max_length=8, blank=True, null=True)
ordre2 = models.CharField(db_column='ORDRE2', max_length=64, blank=True, null=True)
unite_id3 = models.CharField(db_column='UNITE_ID3', max_length=8, blank=True, null=True)
ordre3 = models.CharField(db_column='ORDRE3', max_length=64, blank=True, null=True)
date_supp = models.DateTimeField(db_column='DATE_SUPP', blank=True, null=True)
orientation = models.CharField(db_column='ORIENTATION', max_length=4, blank=True, null=True)
class Meta:
managed = False
db_table = 'T_STUD_MULTIPLE_ACCRED_HISTORY'
class Unites(models.Model):
id = models.IntegerField(primary_key=True)
parent = models.IntegerField(blank=True, null=True)
sigle = models.CharField(max_length=24)
libelle = models.CharField(max_length=128, blank=True, null=True)
gid = models.IntegerField()
hierarchie = models.CharField(max_length=64)
cf = models.CharField(max_length=6, blank=True, null=True)
du = models.DateTimeField(blank=True, null=True)
au = models.DateTimeField(blank=True, null=True)
type = models.CharField(max_length=1, blank=True, null=True)
hierarchie_parent = models.CharField(db_column='HIERARCHIE_PARENT', max_length=100, blank=True, null=True)
parent_id = models.CharField(db_column='PARENT_ID', max_length=5, blank=True, null=True)
class Meta:
managed = False
db_table = 'Unites'
class VPersonDeltaHistory(models.Model):
person_sciper = models.CharField(db_column='PERSON_SCIPER', max_length=9, primary_key=True)
person_lastname = models.CharField(db_column='PERSON_LASTNAME', max_length=2067, blank=True, null=True)
person_firstname = models.CharField(db_column='PERSON_FIRSTNAME', max_length=64, blank=True, null=True)
person_displayname = models.CharField(db_column='PERSON_DISPLAYNAME', max_length=2132, blank=True, null=True)
person_username = models.CharField(db_column='PERSON_USERNAME', max_length=2009, blank=True, null=True)
person_email = models.CharField(db_column='PERSON_EMAIL', max_length=64, blank=True, null=True)
person_uid = models.IntegerField(db_column='PERSON_UID', blank=True, null=True)
person_phone1 = models.CharField(db_column='PERSON_PHONE1', max_length=16, blank=True, null=True)
person_phone2 = models.CharField(db_column='PERSON_PHONE2', max_length=16, blank=True, null=True)
person_main_unit = models.CharField(db_column='PERSON_MAIN_UNIT', max_length=64, blank=True, null=True)
person_main_unit_id = models.CharField(db_column='PERSON_MAIN_UNIT_ID', max_length=16, blank=True, null=True)
person_position = models.CharField(db_column='PERSON_POSITION', max_length=2000, blank=True, null=True)
person_office = models.CharField(db_column='PERSON_OFFICE', max_length=25, blank=True, null=True)
person_gid = models.IntegerField(db_column='PERSON_GID', blank=True, null=True)
person_dn_suffix = models.CharField(db_column='PERSON_DN_SUFFIX', max_length=2301, blank=True, null=True)
person_cn = models.CharField(db_column='PERSON_CN', max_length=2131, blank=True, null=True)
person_upn = models.CharField(db_column='PERSON_UPN', max_length=2200, blank=True, null=True)
person_gone = models.CharField(db_column='PERSON_GONE', max_length=1)
mail_enabled_address = models.CharField(db_column='MAIL_ENABLED_ADDRESS', max_length=64, blank=True, null=True)
person_uac = models.IntegerField(db_column='PERSON_UAC')
person_loginshell = models.CharField(db_column='PERSON_LOGINSHELL', max_length=32, blank=True, null=True)
person_homedirectory = models.CharField(db_column='PERSON_HOMEDIRECTORY', max_length=64, blank=True, null=True)
person_streetaddress = models.CharField(db_column='PERSON_STREETADDRESS', max_length=8000, blank=True, null=True)
person_rfid = models.CharField(db_column='PERSON_RFID', max_length=32, blank=True, null=True)
person_profilepath = models.CharField(db_column='PERSON_PROFILEPATH', max_length=200, blank=True, null=True)
person_appdatapath = models.CharField(db_column='PERSON_APPDATAPATH', max_length=200, blank=True, null=True)
delta_op = models.CharField(db_column='DELTA_OP', max_length=16, blank=True, null=True)
delta_time = models.DateTimeField(db_column='DELTA_TIME', blank=True, null=True)
class Meta:
managed = False
db_table = 'V_PERSON_DELTA_HISTORY'
@classmethod
def get_sciper_at_time(cls, username, time):
objs = cls.objects.filter(person_username=username, delta_time__lte=time).order_by('-delta_time')
if objs.count() == 0:
return None
return objs[0].person_sciper
@classmethod
def get_section_acronym_at_time(cls, sciper, time):
acronym = ''
objs = cls.objects.filter(person_sciper=sciper, delta_time__lte=time).order_by('-delta_time')
if objs.count() == 0:
return acronym
obj = objs[0]
regex = re.search('(.*)-.*', obj.person_main_unit)
if regex:
acronym = regex.group(1)
return acronym
@classmethod
def get_username_at_time(cls, sciper, time):
objs = cls.objects.filter(person_sciper=sciper, delta_time__lte=time).order_by('-delta_time')
if objs.count() == 0:
return ''
return objs[0].person_username
@classmethod
def get_name_at_time(cls, sciper, time):
objs = cls.objects.filter(person_sciper=sciper, delta_time__lte=time).order_by('-delta_time')
if objs.count() == 0:
return ''
return '{} {}'.format(objs[0].person_lastname, objs[0].person_firstname)
@classmethod
def is_student_at_time(cls, sciper, time):
objs = cls.objects.filter(person_sciper=sciper, delta_time__lte=time).order_by('-delta_time')
if objs.count() == 0:
return False
person = objs[0]
main_unit_id = person.person_main_unit_id
try:
unit = Unites.objects.get(id=main_unit_id)
except Unites.DoesNotExist:
return False
if 'EPFL ETU' in unit.hierarchie:
return True
return False
class VPersonHistory(models.Model):
person_sciper = models.CharField(db_column='PERSON_SCIPER', max_length=9, primary_key=True)
person_lastname = models.CharField(db_column='PERSON_LASTNAME', max_length=2067, blank=True, null=True)
person_firstname = models.CharField(db_column='PERSON_FIRSTNAME', max_length=64, blank=True, null=True)
person_displayname = models.CharField(db_column='PERSON_DISPLAYNAME', max_length=2132, blank=True, null=True)
person_username = models.CharField(db_column='PERSON_USERNAME', max_length=2009, blank=True, null=True)
person_email = models.CharField(db_column='PERSON_EMAIL', max_length=64, blank=True, null=True)
person_uid = models.IntegerField(db_column='PERSON_UID', blank=True, null=True)
person_phone1 = models.CharField(db_column='PERSON_PHONE1', max_length=8000, blank=True, null=True)
person_phone2 = models.CharField(db_column='PERSON_PHONE2', max_length=16, blank=True, null=True)
person_main_unit = models.CharField(db_column='PERSON_MAIN_UNIT', max_length=24, blank=True, null=True)
person_main_unit_id = models.CharField(db_column='PERSON_MAIN_UNIT_ID', max_length=16, blank=True, null=True)
person_position = models.CharField(db_column='PERSON_POSITION', max_length=2000, blank=True, null=True)
person_office = models.CharField(db_column='PERSON_OFFICE', max_length=25, blank=True, null=True)
person_gid = models.IntegerField(db_column='PERSON_GID', blank=True, null=True)
person_dn_suffix = models.CharField(db_column='PERSON_DN_SUFFIX', max_length=2301, blank=True, null=True)
person_cn = models.CharField(db_column='PERSON_CN', max_length=2131, blank=True, null=True)
person_upn = models.CharField(db_column='PERSON_UPN', max_length=2200, blank=True, null=True)
person_gone = models.CharField(db_column='PERSON_GONE', max_length=300, blank=True, null=True)
mail_enabled_address = models.CharField(db_column='MAIL_ENABLED_ADDRESS', max_length=64, blank=True, null=True)
person_uac = models.IntegerField(db_column='PERSON_UAC')
person_loginshell = models.CharField(db_column='PERSON_LOGINSHELL', max_length=32, blank=True, null=True)
person_homedirectory = models.CharField(db_column='PERSON_HOMEDIRECTORY', max_length=64, blank=True, null=True)
person_streetaddress = models.CharField(db_column='PERSON_STREETADDRESS', max_length=8000, blank=True, null=True)
person_rfid = models.CharField(db_column='PERSON_RFID', max_length=32, blank=True, null=True)
person_profilepath = models.CharField(db_column='PERSON_PROFILEPATH', max_length=200, blank=True, null=True)
person_appdatapath = models.CharField(db_column='PERSON_APPDATAPATH', max_length=200, blank=True, null=True)
class Meta:
managed = False
db_table = 'V_PERSON_HISTORY'
def get_full_name(self):
return '{} {}'.format(self.person_lastname, self.person_firstname)
|
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib
import logging
import os
import socket
import urlparse
try:
import ssl
except ImportError:
#TODO(bcwaldon): Handle this failure more gracefully
pass
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
from heatclient import exc
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-heatclient'
CHUNKSIZE = 1024 * 64 # 64kB
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.auth_url = kwargs.get('auth_url')
self.auth_token = kwargs.get('token')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.connection_params = self.get_connection_params(endpoint, **kwargs)
@staticmethod
def get_connection_params(endpoint, **kwargs):
parts = urlparse.urlparse(endpoint)
_args = (parts.hostname, parts.port, parts.path)
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
if parts.scheme == 'https':
_class = VerifiedHTTPSConnection
_kwargs['ca_file'] = kwargs.get('ca_file', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
elif parts.scheme == 'http':
_class = httplib.HTTPConnection
else:
msg = 'Unsupported scheme: %s' % parts.scheme
raise exc.InvalidEndpoint(msg)
return (_class, _args, _kwargs)
def get_connection(self):
_class = self.connection_params[0]
try:
return _class(*self.connection_params[1][0:2],
**self.connection_params[2])
except httplib.InvalidURL:
raise exc.InvalidEndpoint()
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % (key, value)
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('ca_file', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.connection_params[2].get(key)
if value:
curl.append(fmt % value)
if self.connection_params[2].get('insecure'):
curl.append('-k')
if 'body' in kwargs:
curl.append('-d \'%s\'' % kwargs['body'])
curl.append('%s%s' % (self.endpoint, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp, body=None):
status = (resp.version / 10.0, resp.status, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.getheaders()])
dump.append('')
if body:
dump.extend([body, ''])
LOG.debug('\n'.join(dump))
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
else:
kwargs['headers'].update(self.credentials_headers())
if self.auth_url:
kwargs['headers'].setdefault('X-Auth-Url', self.auth_url)
self.log_curl_request(method, url, kwargs)
conn = self.get_connection()
try:
conn_params = self.connection_params[1][2]
conn_url = os.path.normpath('%s/%s' % (conn_params, url))
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = "Error finding address for %(url)s: %(e)s" % locals()
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = "Error communicating with %(endpoint)s %(e)s" % locals()
raise exc.CommunicationError(message=message)
body_iter = ResponseBodyIterator(resp)
body_str = ''.join([chunk for chunk in body_iter])
self.log_http_response(resp, body_str)
if 400 <= resp.status < 600:
raise exc.from_response(resp, body_str)
elif resp.status in (301, 302, 305):
# Redirected. Reissue the request to the new location.
location = resp.getheader('location', None)
if location is None:
message = "Location not returned with 302"
raise exc.InvalidEndpoint(message=message)
elif location.startswith(self.endpoint):
# shave off the endpoint, it will be prepended when we recurse
location = location[len(self.endpoint):]
else:
message = "Prohibited endpoint redirect %s" % location
raise exc.InvalidEndpoint(message=message)
return self._http_request(location, method, **kwargs)
elif resp.status == 300:
raise exc.from_response(resp, body_str)
return resp, body_str
def credentials_headers(self):
creds = {}
if self.username:
creds['X-Auth-User'] = self.username
if self.password:
creds['X-Auth-Key'] = self.password
return creds
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_str = self._http_request(url, method, **kwargs)
if 'application/json' in resp.getheader('content-type', None):
body = body_str
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
"""httplib-compatibile connection using client-side SSL authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file=None, cert_file=None,
ca_file=None, timeout=None, insecure=False):
httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
if ca_file is not None:
self.ca_file = ca_file
else:
self.ca_file = self.get_system_ca_file()
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.insecure is True:
kwargs = {'cert_reqs': ssl.CERT_NONE}
else:
kwargs = {'cert_reqs': ssl.CERT_REQUIRED, 'ca_certs': self.ca_file}
if self.cert_file:
kwargs['certfile'] = self.cert_file
if self.key_file:
kwargs['keyfile'] = self.key_file
self.sock = ssl.wrap_socket(sock, **kwargs)
@staticmethod
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem']
for ca in ca_path:
if os.path.exists(ca):
return ca
return None
class ResponseBodyIterator(object):
"""A class that acts as an iterator over an HTTP response."""
def __init__(self, resp):
self.resp = resp
def __iter__(self):
while True:
yield self.next()
def next(self):
chunk = self.resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()
|
|
from ctypes import windll, pointer
from ctypes.wintypes import DWORD
from prompt_toolkit.key_binding.input_processor import KeyPress
from prompt_toolkit.keys import Keys
from prompt_toolkit.win32_types import EventTypes, KEY_EVENT_RECORD, INPUT_RECORD, STD_INPUT_HANDLE
__all__ = (
'ConsoleInputReader',
'raw_mode',
'cooked_mode'
)
class ConsoleInputReader(object):
# Keys with character data.
mappings = {
b'\x1b': Keys.Escape,
b'\x00': Keys.ControlSpace, # Control-Space (Also for Ctrl-@)
b'\x01': Keys.ControlA, # Control-A (home)
b'\x02': Keys.ControlB, # Control-B (emacs cursor left)
b'\x03': Keys.ControlC, # Control-C (interrupt)
b'\x04': Keys.ControlD, # Control-D (exit)
b'\x05': Keys.ControlE, # Contrel-E (end)
b'\x06': Keys.ControlF, # Control-F (cursor forward)
b'\x07': Keys.ControlG, # Control-G
b'\x08': Keys.ControlH, # Control-H (8) (Identical to '\b')
b'\x09': Keys.ControlI, # Control-I (9) (Identical to '\t')
b'\x0a': Keys.ControlJ, # Control-J (10) (Identical to '\n')
b'\x0b': Keys.ControlK, # Control-K (delete until end of line; vertical tab)
b'\x0c': Keys.ControlL, # Control-L (clear; form feed)
b'\x0d': Keys.ControlJ, # Control-J NOTE: Windows sends \r instead of
# \n when pressing enter. We turn it into \n
# to be compatible with other platforms.
b'\x0e': Keys.ControlN, # Control-N (14) (history forward)
b'\x0f': Keys.ControlO, # Control-O (15)
b'\x10': Keys.ControlP, # Control-P (16) (history back)
b'\x11': Keys.ControlQ, # Control-Q
b'\x12': Keys.ControlR, # Control-R (18) (reverse search)
b'\x13': Keys.ControlS, # Control-S (19) (forward search)
b'\x14': Keys.ControlT, # Control-T
b'\x15': Keys.ControlU, # Control-U
b'\x16': Keys.ControlV, # Control-V
b'\x17': Keys.ControlW, # Control-W
b'\x18': Keys.ControlX, # Control-X
b'\x19': Keys.ControlY, # Control-Y (25)
b'\x1a': Keys.ControlZ, # Control-Z
b'\x1c': Keys.ControlBackslash, # Both Control-\ and Ctrl-|
b'\x1d': Keys.ControlSquareClose, # Control-]
b'\x1e': Keys.ControlCircumflex, # Control-^
b'\x1f': Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hypen.)
b'\x7f': Keys.Backspace, # (127) Backspace
}
# Keys that don't carry character data.
keycodes = {
# Home/End
33: Keys.PageUp,
34: Keys.PageDown,
35: Keys.End,
36: Keys.Home,
# Arrows
37: Keys.Left,
38: Keys.Up,
39: Keys.Right,
40: Keys.Down,
46: Keys.Delete,
# F-keys.
112: Keys.F1,
113: Keys.F2,
114: Keys.F3,
115: Keys.F4,
116: Keys.F5,
117: Keys.F6,
118: Keys.F7,
119: Keys.F8,
120: Keys.F9,
121: Keys.F10,
122: Keys.F11,
123: Keys.F12,
}
LEFT_ALT_PRESSED = 0x0002
RIGHT_ALT_PRESSED = 0x0001
SHIFT_PRESSED = 0x0010
LEFT_CTRL_PRESSED = 0x0008
RIGHT_CTRL_PRESSED = 0x0004
def __init__(self):
self.handle = windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)
def read(self):
"""
Read from the Windows console and return a list of `KeyPress` instances.
It can return an empty list when there was nothing to read. (This
function doesn't block.)
http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx
"""
max_count = 1024 # Read max 1024 events at the same time.
result = []
read = DWORD(0)
arrtype = INPUT_RECORD * max_count
input_records = arrtype()
# Get next batch of input event.
windll.kernel32.ReadConsoleInputW(self.handle, pointer(input_records), max_count, pointer(read))
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:
key_presses = self._event_to_key_presses(ev)
if key_presses:
result.extend(key_presses)
return result
def _event_to_key_presses(self, ev):
"""
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
"""
assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown
result = None
u_char = ev.uChar.UnicodeChar
ascii_char = ev.uChar.AsciiChar
if u_char == '\x00':
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], '')
else:
if ascii_char in self.mappings:
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# Correctly handle Control-Arrow keys.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result:
if result.key == Keys.Left:
result.key = Keys.ControlLeft
if result.key == Keys.Right:
result.key = Keys.ControlRight
if result.key == Keys.Up:
result.key = Keys.ControlUp
if result.key == Keys.Down:
result.key = Keys.ControlDown
# Turn 'Tab' into 'BackTab' when shift was pressed.
if ev.ControlKeyState & self.SHIFT_PRESSED and result:
if result.key == Keys.Tab:
result.key = Keys.BackTab
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and result.data == ' ':
result = KeyPress(Keys.ControlSpace, ' ')
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and \
result.key == Keys.ControlJ:
return [KeyPress(Keys.Escape, ''), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
if result:
meta_pressed = ev.ControlKeyState & self.LEFT_ALT_PRESSED or \
ev.ControlKeyState & self.RIGHT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ''), result]
else:
return [result]
else:
return []
class raw_mode(object):
"""
::
with raw_mode(stdin):
''' the windows terminal is now in 'raw' mode. '''
The ``fileno`` attribute is ignored. This is to be compatble with the
`raw_input` method of `.vt100_input`.
"""
def __init__(self, fileno=None):
self.handle = windll.kernel32.GetStdHandle(-10) # STD_INPUT_HANDLE
def __enter__(self):
# Remember original mode.
original_mode = DWORD()
windll.kernel32.GetConsoleMode(self.handle, pointer(original_mode))
self.original_mode = original_mode
self._patch()
def _patch(self):
# Set raw
ENABLE_ECHO_INPUT = 0x0004
ENABLE_LINE_INPUT = 0x0002
ENABLE_PROCESSED_INPUT = 0x0001
windll.kernel32.SetConsoleMode(
self.handle, self.original_mode.value &
~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT))
def __exit__(self, *a, **kw):
# Restore original mode
windll.kernel32.SetConsoleMode(self.handle, self.original_mode)
class cooked_mode(raw_mode):
"""
::
with cooked_mode(stdin):
''' the pseudo-terminal stdin is now used in raw mode '''
"""
def _patch(self):
# Set cooked.
ENABLE_ECHO_INPUT = 0x0004
ENABLE_LINE_INPUT = 0x0002
ENABLE_PROCESSED_INPUT = 0x0001
windll.kernel32.SetConsoleMode(
self.handle, self.original_mode.value |
(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT))
|
|
from sqlalchemy.dialects.postgresql import JSONB, BYTEA
from app import db
import datetime
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key = True)
class Study(Base):
__tablename__ = "study"
title = db.Column(db.String(450), nullable=False)
description = db.Column(db.String(10000), nullable=False)
grant_number = db.Column(db.String(80))
funding_agency = db.Column(db.String(150))
public_release_date = db.Column(db.DateTime, nullable=False)
submission_date = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow())
design_descriptors = db.relationship("StudyDesignDescriptor", backref="study", lazy="dynamic")
inv_id = db.Column(db.Integer, db.ForeignKey('investigation.id'))
def __init__(self,
title,
description,
grant_number,
funding_agency,
public_release_date,
inv_id):
self.title = title
self.description = description
self.grant_number = grant_number
self.funding_agency = funding_agency
self.public_release_date = public_release_date
self.inv_id = inv_id
def __dir__(self):
return self
class StudyPublications(Base):
__tablename__ = "study_publications"
pmid = db.Column(db.Integer)
doi = db.Column(db.String)
author_list = db.Column(db.String)
title = db.Column(db.String)
status = db.Column(db.String)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, pmid, doi, author_list, title, status, study_id):
self.pmid = pmid
self.doi = doi
self.author_list = author_list
self.title = title
self.status = status
self.study_id = study_id
def __dir__(self):
return self
class StudyDesignDescriptor(Base):
__tablename__ = "study_design_descriptors"
type = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String, default="OBI")
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, type, accession_number, study_id):
self.type = type
self.accession_number= accession_number
self.study_id = study_id
def __dir__(self):
return self
class StudyContacts(Base):
__tablename__ = "study_contacts"
last_name = db.Column(db.String)
mid_initials = db.Column(db.String)
first_name = db.Column(db.String)
email = db.Column(db.String)
phone = db.Column(db.String)
fax = db.Column(db.String)
address = db.Column(db.String)
affiliation = db.Column(db.String)
role = db.Column(db.String)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, last_name, mid_initials, first_name, email, phone, fax, address, affiliation, role, study_id):
self.last_name = last_name
self.mid_initials = mid_initials
self.first_name = first_name
self.email = email
self.phone = phone
self.fax = fax
self.address = address
self.affiliation = affiliation
self.role = role
self.study_id = study_id
def __dir__(self):
return self
class StudyProtocols(Base):
__tablename__ = "study_protocols"
name = db.Column(db.String)
type = db.Column(db.String)
description = db.Column(db.String)
uri = db.Column(db.String)
version = db.Column(db.String)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, name, type, description, uri, version, study_id):
self.name = name
self.type = type
self.description = description
self.uri = uri
self.version = version
self.study_id = study_id
def __dir__(self):
return self
class StudyProtocolsParameters(Base):
__tablename__ = "study_protocols_parameters"
name = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
protocol_id = db.Column(db.Integer, db.ForeignKey("study_protocols.id"))
def __init__(self, name, accession_number, ref, protocol_id):
self.name = name
self.accession_number = accession_number
self.ref = ref
self.protocol_id = protocol_id
def __dir__(self):
return self
class StudyProtocolsComponents(Base):
__tablename__ = "study_protocols_components"
name = db.Column(db.String)
type = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
protocol_id = db.Column(db.Integer, db.ForeignKey("study_protocols.id"))
def __init__(self, name, type, accession_number, ref, protocol_id):
self.name = name
self.type = type
self.accession_number = accession_number
self.ref = ref
self.protocol_id = protocol_id
def __dir__(self):
return self
class StudyFactors(Base):
__tablename__ = "study_factors"
name = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
type = db.Column(db.String)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, name, accession_number, ref, study_id, type):
self.name = name
self.accession_number = accession_number
self.ref = ref
self.type = type
self.study_id = study_id
def __dir__(self):
return self
class StudyFactorsTable(Base):
__tablename__ = "study_factors_table"
name = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
study_factor_id = db.Column(db.Integer, db.ForeignKey("study_factors.id"))
def __init__(self, name, accession_number, ref, study_factor_id):
self.name = name
self.accession_number = accession_number
self.ref = ref
self.type = type
self.study_factor_id = study_factor_id
def __dir__(self):
return self
class StudySamples(Base):
__tablename__ = "study_samples"
source_name = db.Column(db.String)
protocol_ref = db.Column(db.Integer, db.ForeignKey("study_protocols.id"))
sample_name = db.Column(db.String)
organism = db.Column(db.String)
organism_part = db.Column(db.String)
sample_fields = db.Column(JSONB)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, source_name, protocol_ref, sample_name, organism, organism_part, sample_fields, study_id):
self.source_name = source_name,
self.protocol_ref = protocol_ref,
self.sample_name = sample_name,
self.organism = organism,
self.organism_part = organism_part,
self.sample_fields = sample_fields,
self.study_id = study_id
def __dir__(self):
return self
class StudySpecies(Base):
__tablename__ = "study_species"
name = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
type = db.Column(db.String)
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
def __init__(self, name, accession_number, ref, type, study_id):
self.name = name
self.accession_number = accession_number
self.ref = ref
self.type = type
self.study_id = study_id
def __dir__(self):
return self
class StudySpeciesPart(Base):
__tablename__ = "study_species_part"
name = db.Column(db.String)
accession_number = db.Column(db.String)
ref = db.Column(db.String)
type = db.Column(db.String)
species_id = db.Column(db.Integer, db.ForeignKey("study_species.id"))
def __init__(self, name, accession_number, ref, type, species_id ):
self.name = name
self.accession_number = accession_number
self.ref = ref
self.type = type
self.species_id = species_id
def __dir__(self):
return self
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video feature."""
import os
import tempfile
from typing import Sequence, Optional, Union
from etils import epath
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.features import feature as feature_lib
from tensorflow_datasets.core.features import image_feature
from tensorflow_datasets.core.features import sequence_feature
from tensorflow_datasets.core.proto import feature_pb2
from tensorflow_datasets.core.utils import type_utils
Json = type_utils.Json
class Video(sequence_feature.Sequence):
"""`FeatureConnector` for videos, encoding frames individually on disk.
Video: The image connector accepts as input a 4 dimensional `tf.uint8` array
representing a video, a sequence of paths to encoded frames, or a path or a
file object that can be decoded with ffmpeg. Note that not all formats in
ffmpeg support reading from pipes, so providing a file object might fail.
Furthermore, if a path is given that is not on the local file system, we first
copy it to a temporary local file before passing it to ffmpeg.
Output:
video: tf.Tensor of type `tf.uint8` and shape
[num_frames, height, width, channels], where channels must be 1 or 3
Example:
* In the DatasetInfo object:
```
features=features.FeatureDict({
'video': features.Video(shape=(None, 64, 64, 3)),
})
```
* During generation, you can use any of:
```
yield {
'video': np.ones(shape=(128, 64, 64, 3), dtype=np.uint8),
}
```
or list of frames:
```
yield {
'video': ['path/to/frame001.png', 'path/to/frame002.png'],
}
```
or path to video (including `os.PathLike` objects):
```
yield {
'video': '/path/to/video.avi',
}
```
or file object (or `bytes`):
```
yield {
'video': tf.io.gfile.GFile('/complex/path/video.avi'),
}
```
"""
def __init__(
self,
shape: Sequence[Optional[int]],
encoding_format: str = 'png',
ffmpeg_extra_args: Sequence[str] = (),
use_colormap: bool = False,
dtype=tf.uint8,
doc: feature_lib.DocArg = None,
):
"""Initializes the connector.
Args:
shape: tuple of ints, the shape of the video (num_frames, height, width,
channels), where channels is 1 or 3.
encoding_format: The video is stored as a sequence of encoded images. You
can use any encoding format supported by image_feature.Feature.
ffmpeg_extra_args: A sequence of additional args to be passed to the
ffmpeg binary. Specifically, ffmpeg will be called as: `` ffmpeg -i
<input_file> <ffmpeg_extra_args> %010d.<encoding_format> ``
use_colormap: Forwarded to `tfds.features.Image`. If `True`,
`tfds.as_dataframe` will display each value in the image with a
different color.
dtype: tf.uint16 or tf.uint8 (default). tf.uint16 can be used only with
png encoding_format
doc: Documentation of this feature (e.g. description).
Raises:
ValueError: If the shape is invalid
"""
shape = tuple(shape)
if len(shape) != 4:
raise ValueError('Video shape should be of rank 4')
self._encoding_format = encoding_format
self._extra_ffmpeg_args = list(ffmpeg_extra_args or [])
super(Video, self).__init__(
image_feature.Image(
shape=shape[1:],
dtype=dtype,
encoding_format=encoding_format,
use_colormap=use_colormap,
),
length=shape[0],
)
def _ffmpeg_decode(self, path_or_fobj):
if isinstance(path_or_fobj, epath.PathLikeCls):
ffmpeg_args = ['-i', os.fspath(path_or_fobj)]
ffmpeg_stdin = None
else:
ffmpeg_args = ['-i', 'pipe:0']
ffmpeg_stdin = path_or_fobj.read()
ffmpeg_args += self._extra_ffmpeg_args
with tempfile.TemporaryDirectory() as ffmpeg_dir:
out_pattern = os.path.join(ffmpeg_dir, f'%010d.{self._encoding_format}')
ffmpeg_args.append(out_pattern)
utils.ffmpeg_run(ffmpeg_args, ffmpeg_stdin)
frames = [ # Load all encoded images
p.read_bytes() for p in sorted(epath.Path(ffmpeg_dir).iterdir())
]
return frames
def encode_example(self, video_or_path_or_fobj):
"""Converts the given image into a dict convertible to tf example."""
if isinstance(video_or_path_or_fobj, epath.PathLikeCls):
video_or_path_or_fobj = os.fspath(video_or_path_or_fobj)
if not os.path.isfile(video_or_path_or_fobj):
_, video_temp_path = tempfile.mkstemp()
try:
tf.io.gfile.copy(
video_or_path_or_fobj, video_temp_path, overwrite=True)
encoded_video = self._ffmpeg_decode(video_temp_path)
finally:
os.unlink(video_temp_path)
else:
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
elif isinstance(video_or_path_or_fobj, bytes):
with tempfile.TemporaryDirectory() as tmpdirname:
video_temp_path = os.path.join(tmpdirname, 'video')
with tf.io.gfile.GFile(video_temp_path, 'wb') as f:
f.write(video_or_path_or_fobj)
encoded_video = self._ffmpeg_decode(video_temp_path)
elif hasattr(video_or_path_or_fobj, 'read'):
encoded_video = self._ffmpeg_decode(video_or_path_or_fobj)
else: # List of images, np.array,...
encoded_video = video_or_path_or_fobj
return super(Video, self).encode_example(encoded_video)
@classmethod
def from_json_content(
cls, value: Union[Json, feature_pb2.VideoFeature]) -> 'Video':
if isinstance(value, dict):
# For backwards compatibility
shape = tuple(value['shape'])
encoding_format = value['encoding_format']
ffmpeg_extra_args = value['ffmpeg_extra_args']
return cls(
shape=shape,
encoding_format=encoding_format,
ffmpeg_extra_args=ffmpeg_extra_args,
)
return cls(
shape=feature_lib.from_shape_proto(value.shape),
dtype=feature_lib.parse_dtype(value.dtype),
encoding_format=value.encoding_format or None,
use_colormap=value.use_colormap,
ffmpeg_extra_args=value.ffmpeg_extra_args,
)
def to_json_content(self) -> feature_pb2.VideoFeature:
return feature_pb2.VideoFeature(
shape=feature_lib.to_shape_proto(self.shape),
dtype=feature_lib.encode_dtype(self.dtype),
encoding_format=self._encoding_format,
use_colormap=self._use_colormap,
ffmpeg_extra_args=self._extra_ffmpeg_args,
)
def repr_html(self, ex: np.ndarray) -> str:
"""Video are displayed as `<video>`."""
return image_feature.make_video_repr_html(
ex,
use_colormap=self.feature._use_colormap # pylint: disable=protected-access # pytype: disable=attribute-error
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Boto translation layer for resumable uploads.
See https://cloud.google.com/storage/docs/resumable-uploads-xml
for details.
Resumable uploads will retry interrupted uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
Unlike the boto implementation of resumable upload handler, this class does
not directly interact with tracker files.
Originally Google wrote and contributed this code to the boto project,
then copied that code back into gsutil on the release of gsutil 4.0 which
supports both boto and non-boto codepaths for resumable uploads. Any bug
fixes made to this file should also be integrated to resumable_upload_handler.py
in boto, where applicable.
TODO: gsutil-beta: Add a similar comment to the boto code.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import errno
import random
import re
import socket
import time
import six
from six.moves import urllib
from six.moves import http_client
from boto import config
from boto import UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from gslib.exception import InvalidUrlError
from gslib.utils.boto_util import GetMaxRetryDelay
from gslib.utils.boto_util import GetNumRetries
from gslib.utils.constants import XML_PROGRESS_CALLBACKS
from gslib.utils.constants import UTF8
if six.PY3:
long = int
class BotoResumableUpload(object):
"""Upload helper class for resumable uploads via boto."""
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (http_client.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating service has nothing (upload protocol uses
# inclusive numbering).
SERVICE_HAS_NOTHING = (0, -1)
def __init__(self,
tracker_callback,
logger,
resume_url=None,
num_retries=None):
"""Constructor. Instantiate once for each uploaded file.
Args:
tracker_callback: Callback function that takes a string argument. Used
by caller to track this upload across upload
interruption.
logger: logging.logger instance to use for debug messages.
resume_url: If present, attempt to resume the upload at this URL.
num_retries: Number of times to retry the upload making no progress.
This count resets every time we make progress, so the upload
can span many more than this number of retries.
"""
if resume_url:
self._SetUploadUrl(resume_url)
else:
self.upload_url = None
self.num_retries = num_retries
self.service_has_bytes = 0 # Byte count at last service check.
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
self.tracker_callback = tracker_callback
self.logger = logger
def _SetUploadUrl(self, url):
"""Saves URL and resets upload state.
Called when we start a new resumable upload or get a new tracker
URL for the upload.
Args:
url: URL string for the upload.
Raises InvalidUrlError if URL is syntactically invalid.
"""
parse_result = urllib.parse.urlparse(url)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUrlError('Invalid upload URL (%s)' % url)
self.upload_url = url
self.upload_url_host = (config.get('Credentials', 'gs_host', None) or
parse_result.netloc)
self.upload_url_path = '%s?%s' % (parse_result.path, parse_result.query)
self.service_has_bytes = 0
def _BuildContentRangeHeader(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _QueryServiceState(self, conn, file_length):
"""Queries service to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload service always returns the current start/end
state whenever a PUT doesn't complete.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
HTTP response from sending request.
Raises:
ResumableUploadException if problem querying service.
"""
# Send an empty PUT so that service replies with this resumable
# transfer's state.
put_headers = {
'Content-Range': (self._BuildContentRangeHeader('*', file_length)),
'Content-Length': '0'
}
return AWSAuthConnection.make_request(conn,
'PUT',
path=self.upload_url_path,
auth_path=self.upload_url_path,
headers=put_headers,
host=self.upload_url_host)
def _QueryServicePos(self, conn, file_length):
"""Queries service to find out what bytes it currently has.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
(service_start, service_end), where the values are inclusive.
For example, (0, 2) would mean that the service has bytes 0, 1, *and* 2.
Raises:
ResumableUploadException if problem querying service.
"""
resp = self._QueryServiceState(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the service has the complete
# file, we return (service_start, file_length-1). That way the
# calling code can always simply read up through service_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether service_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the service didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the upload URL to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# upload URL to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from service state query' % resp.status,
ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search(r'bytes=(\d+)-(\d+)', range_spec)
if m:
service_start = long(m.group(1))
service_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the service does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the service
# has byte 0, omitting the Range header is used to indicate that
# the service doesn't have any bytes.
return self.SERVICE_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload service state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
self.logger.debug('Service has: Range: %d - %d.', service_start,
service_end)
return (service_start, service_end)
def _StartNewResumableUpload(self, key, headers=None):
"""Starts a new resumable upload.
Args:
key: Boto Key representing the object to upload.
headers: Headers to use in the upload requests.
Raises:
ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
self.logger.debug('Starting new resumable upload.')
self.service_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the service to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request('POST', key.bucket.name, key.name, post_headers)
# Get upload URL from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [429, 500, 503]:
# Retry after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status, ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
upload_url = resp.getheader('Location')
if not upload_url:
raise ResumableUploadException(
'No resumable upload URL found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._SetUploadUrl(upload_url)
self.tracker_callback(upload_url)
def _UploadFileBytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""Attempts to upload file bytes.
Makes a single attempt using an existing resumable upload connection.
Args:
conn: HTTPConnection from the boto Key.
http_conn: Separate HTTPConnection for the transfer.
fp: File pointer containing bytes to upload.
file_length: Total length of the file.
total_bytes_uploaded: The total number of bytes uploaded.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
headers: Headers to be used in the upload requests.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb - 2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
put_headers = headers.copy() if headers else {}
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._BuildContentRangeHeader('*', file_length)
else:
range_header = self._BuildContentRangeHeader(
'%d-%d' % (total_bytes_uploaded, file_length - 1), file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn,
'PUT',
path=self.upload_url_path,
auth_path=None,
headers=put_headers,
host=self.upload_url_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
# Some code is duplicated here, but separating the PY2 and PY3 paths makes
# this easier to remove PY2 blocks when we move to PY3 only.
if six.PY2:
http_conn.send(buf)
total_bytes_uploaded += len(buf)
else:
if isinstance(buf, bytes):
http_conn.send(buf)
total_bytes_uploaded += len(buf)
else:
# Probably a unicode/str object, try encoding.
buf_bytes = buf.encode(UTF8)
http_conn.send(buf_bytes)
total_bytes_uploaded += len(buf_bytes)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
if resp.status == 200:
# Success.
return (resp.getheader('etag'), resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 429, 500 and 503 errors after a delay.
elif resp.status in [408, 429, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException(
'Got response code %d while attempting '
'upload (%s)' % (resp.status, resp.reason), disposition)
def _AttemptResumableUpload(self, key, fp, file_length, headers, cb, num_cb):
"""Attempts a resumable upload.
Args:
key: Boto key representing object to upload.
fp: File pointer containing upload bytes.
file_length: Total length of the upload.
headers: Headers to be used in upload requests.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
(service_start, service_end) = self.SERVICE_HAS_NOTHING
conn = key.bucket.connection
if self.upload_url:
# Try to resume existing resumable upload.
try:
(service_start,
service_end) = (self._QueryServicePos(conn, file_length))
self.service_has_bytes = service_start
if conn.debug >= 1:
self.logger.debug('Resuming transfer.')
except ResumableUploadException as e:
if conn.debug >= 1:
self.logger.debug('Unable to resume transfer (%s).', e.message)
self._StartNewResumableUpload(key, headers)
else:
self._StartNewResumableUpload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = service_end
total_bytes_uploaded = service_end + 1
# Start reading from the file based upon the number of bytes that the
# server has so far.
if total_bytes_uploaded < file_length:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.upload_url_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through service will terminate current upload
# and can report that progress on next attempt.
try:
return self._UploadFileBytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers)
except (ResumableUploadException, socket.error):
resp = self._QueryServiceState(conn, file_length)
if resp.status == 400:
raise ResumableUploadException(
'Got 400 response from service state query after failed resumable '
'upload attempt. This can happen for various reasons, including '
'specifying an invalid request (e.g., an invalid canned ACL) or '
'if the file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def HandleResumableUploadException(self, e, debug):
if e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS:
if debug >= 1:
self.logger.debug(
'Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.ABORT:
if debug >= 1:
self.logger.debug(
'Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.START_OVER:
raise
else:
if debug >= 1:
self.logger.debug('Caught ResumableUploadException (%s) - will retry',
e.message)
def TrackProgressLessIterations(self,
service_had_bytes_before_attempt,
debug=0):
"""Tracks the number of iterations without progress.
Performs randomized exponential backoff.
Args:
service_had_bytes_before_attempt: Number of bytes the service had prior
to this upload attempt.
debug: debug level 0..3
"""
# At this point we had a re-tryable failure; see if made progress.
if self.service_has_bytes > service_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = min(random.random() * (2**self.progress_less_iterations),
GetMaxRetryDelay())
if debug >= 1:
self.logger.debug(
'Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying',
self.progress_less_iterations, sleep_time_secs)
time.sleep(sleep_time_secs)
def SendFile(self,
key,
fp,
size,
headers,
canned_acl=None,
cb=None,
num_cb=XML_PROGRESS_CALLBACKS):
"""Upload a file to a key into a bucket on GS, resumable upload protocol.
Args:
key: `boto.s3.key.Key` or subclass representing the upload destination.
fp: File pointer to upload
size: Size of the file to upload.
headers: The headers to pass along with the PUT request
canned_acl: Optional canned ACL to apply to object.
cb: Callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
num_cb: (optional) If a callback is specified with the cb parameter, this
parameter determines the granularity of the callback by defining
the maximum number of times the callback will be called during the
file transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
Raises:
ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
content_type = 'Content-Type'
if content_type in headers and headers[content_type] is None:
del headers[content_type]
if canned_acl:
headers[key.provider.acl_header] = canned_acl
headers['User-Agent'] = UserAgent
file_length = size
debug = key.bucket.connection.debug
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = GetNumRetries()
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
service_had_bytes_before_attempt = self.service_has_bytes
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
_, self.generation, self.metageneration = self._AttemptResumableUpload(
key, fp, file_length, headers, cb, num_cb)
key.generation = self.generation
if debug >= 1:
self.logger.debug('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
self.logger.debug('Caught exception (%s)', e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException as e:
self.HandleResumableUploadException(e, debug)
self.TrackProgressLessIterations(service_had_bytes_before_attempt,
debug=debug)
|
|
import math
import pytest
from pycket.interpreter import *
from pycket.values import *
from pycket.prims import *
from pycket.test.testhelper import run_fix, run, run_top, run_std, run_flo
from pycket.error import SchemeException
def test_flonum_tostring():
from rpython.rtyper.test.test_llinterp import interpret
import math
s = '3.141592653589793' # racket -e "pi"
def float_tostring(x):
print W_Flonum(x).tostring()
return s in W_Flonum(x).tostring()
res = interpret(float_tostring, [math.pi])
assert res
def test_mul_zero():
run_fix("(* 0 1.2)", 0)
run_fix("(* 1.2 0)", 0)
def test_quotient():
run_fix("(quotient 0 1)", 0)
run_fix("(quotient 0 -1)", 0)
run_fix("(quotient 0 2)", 0)
run_fix("(quotient 0 -2)", 0)
run_fix("(quotient 0 3)", 0)
run_fix("(quotient 1 1)", 1)
run_fix("(quotient -1 1)", -1)
run_fix("(quotient 1 -1)", -1)
run_fix("(quotient -1 -1)", 1)
run_fix("(quotient 1 2)", 0)
run_fix("(quotient -1 2)", 0)
run_fix("(quotient 1 -2)", 0)
run_fix("(quotient -1 -2)", 0)
run_fix("(quotient -1234 -10)", 123)
run_fix("(quotient 1234 1234)", 1)
big = 2 ** 70
run_fix("(quotient %s %s)" % (big, big), 1)
run_fix("(quotient %s %s)" % (-big, big), -1)
run_fix("(quotient %s %s)" % (big, -big), -1)
run_fix("(quotient %s %s)" % (-big, -big), 1)
run_fix("(quotient %s %s)" % (big+1, big), 1)
run_fix("(quotient %s %s)" % (-(big+1), big), -1)
res = run(str(big / 2))
run("(quotient %s 2)" % (big, ), res)
res = run("(quotient 8.0 2.0)")
assert isinstance(res, W_Flonum) and res.value == 4.0
res = run("(quotient 1.0 2.0)")
assert isinstance(res, W_Flonum) and res.value == 0.0
def test_remainder(doctest):
"""
> (remainder 0 1)
0
> (remainder 0 -1)
0
> (remainder 0 2)
0
> (remainder 0 -2)
0
> (remainder 1 1)
0
> (remainder -1 1)
0
> (remainder 1 -1)
0
> (remainder 2 1)
0
> (remainder 2 -1)
0
> (remainder 4 3)
1
> (remainder 4 -3)
1
> (remainder -4 3)
-1
> (remainder 10 3)
1
> (remainder -10.0 3)
-1.0
> (remainder 10.0 -3)
1.0
> (remainder -10 -3)
-1
> (remainder 11111111111111111111111111111111111111 3333333333333333333333333333333)
1111111111111111111111112222222
> (remainder 11111111111111111111111111111111111111 -3333333333333333333333333333333)
1111111111111111111111112222222
> (remainder -11111111111111111111111111111111111111 3333333333333333333333333333333)
-1111111111111111111111112222222
> (remainder -11111111111111111111111111111111111111 -3333333333333333333333333333333)
-1111111111111111111111112222222
"""
def test_modulo(doctest):
"""
> (modulo 10 3)
1
> (modulo -10.0 3)
2.0
> (modulo 10.0 -3)
-2.0
> (modulo -10 -3)
-1
> (modulo 1111111111111111111111111111111111111111111111111111111111111111111111111111 -2222222222222222222222222222222222222222222)
-2222222222111111111111111111111111111111111
> (modulo -1111111111111111111111111111111111111111111111111111111111111111111111111111 2222222222222222222222222222222222222222222)
2222222222111111111111111111111111111111111
> (modulo -1111111111111111111111111111111111111111111111111111111111111111111111111111 -2222222222222222222222222222222222222222222)
-111111111111111111111111111111111
"""
def test_div_fix():
run_fix("(/ 6 3)", 2)
x = run("(/ 1 2)")
assert x.tostring() == "1/2"
def test_div_complex(doctest):
"""
> (/ 2+3i 2)
1+3/2i
> (/ 2+3i 3-4i)
-6/25+17/25i
"""
def test_lt():
run("(< 0 1)", w_true)
run("(< 0 1000000000000000000000000000)", w_true)
run("(< 10000000000000000000000000001000000000000000000000000000 0 )", w_false)
run("(> 35074662110434038747627587960280857993524015880330828824075798024790963850563322203657080886584969261653150406795437517399294548941469959754171038918004700847889956485329097264486802711583462946536682184340138629451355458264946342525383619389314960644665052551751442335509249173361130355796109709885580674313954210217657847432626760733004753275317192133674703563372783297041993227052663333668509952000175053355529058880434182538386715523683713208549376 0.0)", w_true)
def test_lt_fixnum_flonum():
run("(< 0 1.0)", w_true)
run("(< 0 1000000000000000000000000000.0)", w_true)
run("(< 10000000000000000000000000001000000000000000000000000000 0.0 )", w_false)
run("(< 0.0 1)", w_true)
run("(< 0.0 1000000000000000000000000000)", w_true)
run("(< 10000000000000000000000000001000000000000000000000000000.0 0 )", w_false)
def test_lt_fixnum_bignum():
run("(< (expt 10 100) 1)", w_false)
run("(< 1 (expt 10 100))", w_true)
def test_lt_flonum_bignum():
run("(< (expt 10 100) 1.0)", w_false)
run("(< 1.0 (expt 10 100))", w_true)
def test_comparison_doctest(doctest):
"""
> (= 5+5i 5+5.0i)
#t
> (= 5+5i 5+5.0i 5.0+5i 5.0+5.0i)
#t
> (= 2/3 2/3)
#t
> (= 5+5i 5+3i)
#f
> (= 2/3 2/7)
#f
> (< -1/2 -1/3 2/3 11/10)
#t
> (< -2/3 1)
#t
> (< 2/3 1/3)
#f
E (< 1)
E (< )
E (< #f #t)
"""
def test_neg_pos():
run("(negative? -1)", w_true)
run("(negative? 0)", w_false)
run("(negative? 1)", w_false)
run("(negative? -1.0)", w_true)
run("(negative? 0.0)", w_false)
run("(negative? 1.0)", w_false)
run("(negative? -10000000000000000000000000001000000000000000000000000000)", w_true)
run("(negative? 10000000000000000000000000001000000000000000000000000000)", w_false)
run("(negative? -1/2)", w_true)
run("(negative? 1/2)", w_false)
run("(positive? -1)", w_false)
run("(positive? 0)", w_false)
run("(positive? 1)", w_true)
run("(positive? -1.0)", w_false)
run("(positive? 0.0)", w_false)
run("(positive? 1.0)", w_true)
run("(positive? -10000000000000000000000000001000000000000000000000000000)", w_false)
run("(positive? 10000000000000000000000000001000000000000000000000000000)", w_true)
run("(positive? -1/2)", w_false)
run("(positive? 1/2)", w_true)
def test_even_odd():
run("(even? -1)", w_false)
run("(even? 0)", w_true)
run("(even? 1)", w_false)
run("(even? -1.0)", w_false)
run("(even? 0.0)", w_true)
run("(even? 1.0)", w_false)
run("(even? -10000000000000000000000000001000000000000000000000000000)", w_true)
run("(even? 10000000000000000000000000001000000000000000000000000000)", w_true)
run("(even? -10000000000000000000000000001000000000000000000000000001)", w_false)
run("(even? 10000000000000000000000000001000000000000000000000000001)", w_false)
run("(odd? -1)", w_true)
run("(odd? 0)", w_false)
run("(odd? 1)", w_true)
run("(odd? -1.0)", w_true)
run("(odd? 0.0)", w_false)
run("(odd? 1.0)", w_true)
run("(odd? -10000000000000000000000000001000000000000000000000000000)", w_false)
run("(odd? 10000000000000000000000000001000000000000000000000000000)", w_false)
run("(odd? -10000000000000000000000000001000000000000000000000000001)", w_true)
run("(odd? 10000000000000000000000000001000000000000000000000000001)", w_true)
run("(even? 1.0)", w_false)
run("(even? 2.0)", w_true)
run("(odd? 1.0)", w_true)
run("(odd? 2.0)", w_false)
def test_zero(doctest):
"""
> (zero? -1)
#f
> (zero? 0)
#t
> (zero? 1)
#f
> (zero? -1.0)
#f
> (zero? 0.0)
#t
> (zero? 1.0)
#f
> (zero? 7/3)
#f
> (zero? 0.0+0.0i)
#t
> (zero? 0.0+0.1i)
#f
"""
def test_string_to_number(doctest):
"""
;! (require racket/extflonum)
;> (extflonum? (string->number "3.0t0"))
;#t
; not yet supported
;> (string->number "3.0+2.5i")
;3.0+2.5i
> (string->number "hello")
#f
;> (string->number "111" 7)
;57
;> (string->number "#b111" 7)
;7
> (string->number "13")
13
> (string->number "-13")
-13
> (string->number "-1.3")
-1.3
> (string->number "1.3")
1.3
> (string->number "-10000000000000000000000000001000000000000000000000000000")
-10000000000000000000000000001000000000000000000000000000
> (string->number "10000000000000000000000000001000000000000000000000000000")
10000000000000000000000000001000000000000000000000000000
"""
assert doctest
@pytest.mark.skip(reason="will be resolved then tostring is not 'write'")
def test_number_to_string(doctest):
"""
> (number->string 1)
"1"
> (number->string 1.0)
"1.0"
> (number->string 1.0+3i)
"1.0+3.0i"
> (number->string 4172093847129036571265901283764790162495071902346790126349016234)
"4172093847129036571265901283764790162495071902346790126349016234"
"""
@pytest.mark.xfail
def test_atan(doctest):
"""
> (atan 0.5)
0.4636476090008061
> (atan 2 1)
1.1071487177940904
> (atan -2 -1)
-2.0344439357957027
> (atan 1.0+5.0i)
1.530881333938778+0.19442614214700213i
> (atan +inf.0 -inf.0)
2.356194490192345
"""
# doesn't run yet and takes awefully long
@pytest.mark.skipif("True")
def test_trigonometry(doctest):
"""
! (require racket/math)
> (sin 3.14159)
2.65358979335e-06
> (sin 1.0+5.0i)
62.44551846769654+40.0921657779984i
; 62.4455184677+40.092165778i
; 62.44551846769653+40.0921657779984i
> (cos 3.14159)
-0.9999999999964793
> (cos 1.0+5.0i)
40.09580630629883-62.43984868079963i
;40.095806306298826-62.43984868079963i
> (tan 0.7854)
1.0000036732118494
;1.0000036732118496
> (tan 1.0+5.0i)
8.256719834243142e-05+1.0000377833796008i
;8.256719834227411e-05+1.0000377833796008i
> (sinh 3.14159)
11.548708597009512
> (sinh 1.0+5.0i)
0.3333601389479929-1.4796974784869428i
> (cosh 3.14159)
11.591922629945447
> (cosh 1.0+5.0i)
0.43771362521767465-1.1269289521981367i
> (tanh 0.7854)
0.6557952493735839
> (tanh 1.0+5.0i)
1.2407479829240695-0.18610947764730412i
;1.2407479829240697-0.18610947764730418i
> (asin 0.25)
0.25268025514207865
> (asin 1.0+5.0i)
0.1937931365549321+2.3309746530493123i
> (acos 0.25)
1.318116071652818
> (acos 1.0+5.0i)
1.3770031902399644-2.3309746530493123i
"""
def test_flonum_special(doctest):
"""
! (require '#%flfxnum)
> (fl+ 1.0 2.0)
3.0
> (fl- 2.0 1.0)
1.0
> (fl* 2.0 0.5)
1.0
> (fl/ 2.0 0.5)
4.0
> (flmin 1.0 2.0)
1.0
> (flmin 2.0 1.0)
1.0
> (flmax 1.0 2.0)
2.0
> (flmax 2.0 1.0)
2.0
> (fl> 2.5 1.5)
#t
> (fl>= 2.5 1.5)
#t
> (fl>= 1.5 1.5)
#t
> (fl>= -1.5 1.5)
#f
> (fl<= -1.5 1.5)
#t
> (fl<= -10.5 -10.5)
#t
> (fl< -10.0 -10.0)
#f
> (fl= -10.0 -10.0)
#t
E (fl= -10 -10.0)
"""
def test_fixnum_special(doctest):
"""
! (require '#%flfxnum)
> (fx+ 1 2)
3
E (fx+ 1 1.2)
> (fx- 2 1)
1
> (fx* 2 5)
10
> (fxmin 1 2)
1
> (fxmin 2 1)
1
> (fxmax 1 2)
2
> (fxmax 2 1)
2
> (fx> 2 1)
#t
> (fx>= 2 1)
#t
> (fx>= 1 1)
#t
> (fx>= -1 1)
#f
> (fx<= -1 1)
#t
> (fx<= 1 2 3)
#t
> (fx<= -10 -10)
#t
> (fx< -10 -10)
#f
> (fx= -10 -10)
#t
E (fx= -10 -10.0)
> (fxand 2 3)
2
> (fxlshift 10 10)
10240
E (fxlshift 10 63)
E (fxlshift 10 100)
> (fxrshift 1 20)
0
> (fxrshift 20 1)
10
> (fxrshift -20 1)
-10
> (fxmodulo 10 3)
1
> (fxmodulo -10 -3)
-1
> (fxremainder 10 3)
1
> (fxremainder -10 -3)
-1
> (fxquotient 10 3)
3
"""
def test_all_comparators(doctest):
"""
; http://docs.racket-lang.org/reference/generic-numbers.html
> (= 1 1.0)
#t
> (= 1 2)
#f
> (= 2+3i 2+3i 2+3i)
#t
> (< 1 1)
#f
> (< 1 2 3)
#t
> (< 1 +inf.0)
#t
> (< 1 +nan.0)
#f
> (<= 1 1)
#t
> (<= 1 2 1)
#f
> (> 1 1)
#f
> (> 3 2 1)
#t
> (> +inf.0 1)
#t
> (> +nan.0 1)
#f
> (>= 1 1)
#t
> (>= 1 2 1)
#f
> (procedure-arity-includes? = 0)
#f
> (procedure-arity-includes? = 1)
#f
> (procedure-arity-includes? = 2)
#t
> (procedure-arity-includes? = 3)
#t
> (procedure-arity-includes? = 4)
#t
"""
@pytest.mark.xfail
def test_edge_cases(doctest):
"""
> (* 0.0 1)
0.0
> (* 0 0.1)
0
> (* 0.0 0)
0
> (+ -0.1 0.1)
0.0
> (complex? (+ 1+1i 1-1i))
#t
> (complex? 2)
#t
> (+ 1+0.5i 1-0.5i)
2.0+0.0i
> (real? (+ 1+0.5i 1-0.5i))
#f
> (integer? (+ 1+1i 1-1i))
#t
> (+ 1+1i 1-1i)
2
> (real? 3/7)
#t
"""
def test_rational(doctest):
"""
> (/ 1 2)
1/2
> (+ 1/2 1/3)
5/6
> (+ 1/2 1)
3/2
> (+ 1/2 0.5)
1.0
> (- 4/5 -7/9)
71/45
> (- 1/2 2)
-3/2
> (- 1/2 0.0)
0.5
> (/ 2/3 3/2)
4/9
> (/ -2/3 -5)
2/15
> (/ 0.5 -1/4)
-2.0
> (* 2/3 3/2)
1
> (* 2 3/2)
3
> (* 3/2 5)
15/2
> (* 1/2 2.0)
1.0
> (+ 1/4 1/4)
1/2
> (sub1 5/3)
2/3
; bignum to rational
> (/ 12323111111111111111111111111111111111111112222222222222 232321122)
2053851851851851851851851851851851851851852037037037037/38720187
"""
def random_bigint(max_size):
from rpython.rlib.rbigint import rbigint
import random
size = random.randrange(1, max_size)
sign = random.choice([-1, 1])
digits = "".join([random.choice("0123456789") for _ in range(size)])
bignum = rbigint.fromstr(digits, base=10)
return bignum.int_mul(sign)
def test_gcd():
from pycket.arithmetic import gcd
from rpython.rlib.rbigint import rbigint
def gcd_long(a, b):
return gcd(rbigint.fromlong(a), rbigint.fromlong(b)).tolong()
for a, b, r in [(5, 0, 5),
(2**1000, 0, 2**1000),
(2**1000 + 1, 3, 1),
(4, 2, 2),
(3*3*5*7*11*2**10, 2**7*3*7*11*13, 2**7*3*7*11)]:
assert gcd_long(a, b) == r
assert gcd_long(b, a) == r
if b:
assert gcd_long(a, -b) == r
assert gcd_long(-a, -b) == (-r if a else r)
assert gcd_long(a, b) == r
else:
assert gcd_long(-a, b) == r
if a:
assert gcd_long(b, -a) == r
assert gcd_long(-b, -a) == (-r if b else r)
assert gcd_long(-b, a) == r
else:
assert gcd_long(-b, a) == r
def test_gcd_random():
from pycket.arithmetic import gcd
for _ in range(100):
a = random_bigint(100)
b = random_bigint(100)
c = random_bigint(100)
# Commutative
assert gcd(a, b) == gcd(b, a)
# Idempotent
assert gcd(a, a) == a
assert gcd(b, b) == b
# Associative
assert gcd(a, gcd(b, c)) == gcd(gcd(a, b), c)
a = a.abs()
b = b.abs()
if a.ge(b):
assert gcd(a, b) == gcd(a.sub(b), b)
else:
assert gcd(a, b) == gcd(a, b.sub(a))
def test_count_trailing_zeros():
from rpython.rlib.rbigint import ONERBIGINT
from pycket.arithmetic import count_trailing_zeros
for i in range(1025):
assert i == count_trailing_zeros(ONERBIGINT.lshift(i))
def test_sub1(doctest):
"""
> (sub1 1)
0
> (sub1 -11111111111111111111111111111111112)
-11111111111111111111111111111111113
> (sub1 1.4)
0.3999999999999999
> (sub1 1.5)
0.5
> (sub1 1+1i)
0+1i
> (sub1 1/2)
-1/2
"""
w_x = W_Fixnum(-sys.maxint-1).arith_sub1()
assert isinstance(w_x, W_Bignum)
def test_round(doctest):
"""
> (round 0.1)
0.0
> (round 0.0)
0.0
> (round 0.5)
0.0
> (round 0.51)
1.0
> (round -0.5)
-0.0
> (round -0.5001)
-1.0
> (round 1)
1
> (round 111111111111111111111111111111111111111111111111)
111111111111111111111111111111111111111111111111
> (round 1/2)
0
> (round 3/2)
2
> (round 11/20)
1
> (round -11/20)
-1
> (round -1/2)
0
> (round -5/4)
-1
> (round 5/4)
1
> (round 111111111111111111111111111111111111111/2)
55555555555555555555555555555555555556
"""
def test_flround(doctest):
"""
! (require '#%flfxnum)
> (flround 0.1)
0.0
> (flround 0.0)
0.0
> (flround 0.5)
0.0
> (flround 0.51)
1.0
> (flround -0.5)
-0.0
> (flround -0.5001)
-1.0
"""
def test_max(doctest):
"""
! (require racket/math)
> (max 1 1.1)
1.1
> (max 1 0.2)
1.0
> (max 111111111111111111111111111111111111111 5)
111111111111111111111111111111111111111
> (max 111111111111111111111111111111111111111111111111111111111111 0.2)
1.1111111111111112e+59
> (max 1 3 2)
3
> (max 1 3 2.0)
3.0
> (max 1 1.1 0)
1.1
> (max 1 0.2 -5)
1.0
> (max 111111111111111111111111111111111111111 5 6)
111111111111111111111111111111111111111
> (max 111111111111111111111111111111111111111111111111111111111111 0.2 1023)
1.1111111111111112e+59
> (max 1 3 2 -6)
3
> (max 1 3 -17 2.0)
3.0
> (max 1 3/2 1/2)
3/2
> (min 1 3/2 1/2)
1/2
> (nan? (min +inf.0 +nan.0 -inf.0))
#t
> (nan? (max +inf.0 +nan.0 -inf.0))
#t
"""
def test_bitwise(doctest):
"""
> (bitwise-ior 1 2)
3
> (bitwise-ior -32 1)
-31
> (bitwise-ior)
0
> (bitwise-and 1 2)
0
> (bitwise-and -32 -1)
-32
> (bitwise-and)
-1
> (bitwise-xor 1 5)
4
> (bitwise-xor -32 -1)
31
> (bitwise-xor)
0
> (bitwise-not 1)
-2
> (bitwise-not -1111111111111111111111111111111111111111111114243232)
1111111111111111111111111111111111111111111114243231
> (bitwise-bit-set? 5 0)
#t
> (bitwise-bit-set? 5 2)
#t
> (bitwise-bit-set? -5 (expt 2 700))
#t
> (bitwise-bit-set? 5 (expt 2 700))
#f
> (bitwise-bit-set? (expt 2 100) 100)
#t
> (bitwise-bit-set? (expt 2 100) 101)
#f
E (bitwise-bit-set? 2 -5)
"""
def test_exact_to_inexact(doctest):
"""
> (exact->inexact 1)
1.0
> (exact->inexact 1/2)
0.5
> (exact->inexact 0.5)
0.5
> (exact->inexact 1+2i)
1.0+2.0i
> (exact->inexact 102222222222222222222222222222222222222222222222123123)
1.0222222222222222e+53
"""
def test_inexact_to_exact(doctest):
"""
> (inexact->exact 1.0)
1
> (inexact->exact 0.5)
1/2
> (inexact->exact 1/2)
1/2
> (inexact->exact 1.0+2.0i)
1+2i
> (inexact->exact 1.0222222222222222e+53)
102222222222222223892324523663483522756187192341561344
"""
def test_flonum_unsafe(doctest):
"""
! (require '#%flfxnum '#%unsafe)
> (unsafe-fl+ 1.0 2.0)
3.0
> (unsafe-fl- 2.0 1.0)
1.0
> (unsafe-fl* 2.0 0.5)
1.0
> (unsafe-fl/ 2.0 0.5)
4.0
> (unsafe-flmin 3.0 5.4)
3.0
> (unsafe-flmax 3.0 5.4)
5.4
"""
def test_fixnum_unsafe(doctest):
"""
! (require '#%flfxnum '#%unsafe)
> (unsafe-fx+ 10 20)
30
> (unsafe-fx- 20 10)
10
> (unsafe-fx* 20 5)
100
> (unsafe-fxmin 10 20)
10
> (unsafe-fxmin 20 10)
10
> (unsafe-fxmax 10 20)
20
> (unsafe-fxmax 20 10)
20
> (unsafe-fxmodulo -100 30)
20
> (unsafe-fxmodulo 100 -30)
-20
> (unsafe-fx- 2 1)
1
> (unsafe-fx* 2 5)
10
> (unsafe-fxmin 1 2)
1
> (unsafe-fxmin 2 1)
1
> (unsafe-fxmax 1 2)
2
> (unsafe-fxmax 2 1)
2
> (unsafe-fx> 2 1)
#t
> (unsafe-fx>= 2 1)
#t
> (unsafe-fx>= 1 1)
#t
> (unsafe-fx>= -1 1)
#f
> (unsafe-fx<= -1 1)
#t
> (unsafe-fx<= -10 -10)
#t
> (unsafe-fx< -10 -10)
#f
> (unsafe-fx= -10 -10)
#t
> (unsafe-fxand 2 3)
2
> (unsafe-fxior 2 3)
3
> (unsafe-fxlshift 10 10)
10240
> (unsafe-fxrshift 1 20)
0
> (unsafe-fxrshift 20 1)
10
> (unsafe-fxrshift -20 1)
-10
> (unsafe-fxmodulo 10 3)
1
> (unsafe-fxmodulo -10 -3)
-1
> (unsafe-fxremainder 10 3)
1
> (unsafe-fxremainder -10 -3)
-1
> (unsafe-fxquotient 10 3)
3
> (unsafe-fxlshift 1 10)
1024
; implementation defined, but should terminate quickly
> (unsafe-fxlshift 1 10000000)
;1
"""
def test_exp(doctest):
"""
> (exp 1)
2.718281828459045
> (exp 0)
1
> (exp 2+3i)
-7.315110094901103+1.0427436562359045i
> (exp 2.0+3i)
-7.315110094901103+1.0427436562359045i
"""
def test_shift(doctest):
"""
> (arithmetic-shift 1 10)
1024
> (arithmetic-shift 255 -3)
31
> (arithmetic-shift 10 1000)
107150860718626732094842504906000181056140481170553360744375038837035105112493612249319837881569585812759467291755314682518714528569231404359845775746985748039345677748242309854210746050623711418779541821530464749835819412673987675591655439460770629145711964776865421676604298316526243868372056680693760
> (arithmetic-shift 107150860718626732094842504906000181056140481170553360744375038837035105112493612249319837881569585812759467291755314682518714528569231404359845775746985748039345677748242309854210746050623711418779541821530464749835819412673987675591655439460770629145711964776865421676604298316526243868372056680693760 -1000)
10
"""
def test_ceiling(doctest):
"""
> (ceiling 17/4)
5
> (ceiling -17/4)
-4
> (ceiling 2.5)
3.0
> (ceiling -2.5)
-2.0
"""
def test_floor(doctest):
"""
> (floor 17/4)
4
> (floor -17/4)
-5
> (floor 2.5)
2.0
> (floor -2.5)
-3.0
"""
def test_truncate(doctest):
"""
> (truncate 17/4)
4
> (truncate -17/4)
-4
> (truncate 2.5)
2.0
> (truncate -2.5)
-2.0
> (truncate +inf.0)
+inf.0
"""
def test_flceiling(doctest):
"""
! (require racket/flonum)
> (flceiling 2.5)
3.0
> (flceiling -2.5)
-2.0
"""
def test_flfloor(doctest):
"""
! (require racket/flonum)
> (flfloor 2.5)
2.0
> (flfloor -2.5)
-3.0
"""
def test_fltruncate(doctest):
"""
! (require racket/flonum)
> (fltruncate 2.5)
2.0
> (fltruncate -2.5)
-2.0
> (fltruncate +inf.0)
+inf.0
"""
def test_abs(doctest):
"""
> (abs 1/2)
1/2
> (abs -1/2)
1/2
> (abs 1)
1
> (abs 1000000000000000000000002120000000000000000000000000000000)
1000000000000000000000002120000000000000000000000000000000
> (abs -1000000000000000000000002120000000000000000000000000000000)
1000000000000000000000002120000000000000000000000000000000
> (abs -1.232)
1.232
"""
@pytest.mark.xfail
def test_expt(doctest):
"""
> (expt 2 3)
8
> (expt 4 0.5)
2.0
> (expt 2 -5)
1/32
> (expt 2. -5)
0.03125
"""
def test_error(doctest):
"""
E (+ 'a 1)
"""
def test_rational_predicate(doctest):
"""
> (rational? 1)
#t
> (rational? +inf.0)
#f
> (rational? "hello")
#f
> (rational? 7/3)
#t
> (rational? 13647861237849612903845789012745781623478613289571907344901263)
#t
"""
def test_exact_predicate(doctest):
"""
> (exact? -17)
#t
> (exact? 999999999999999999999999)
#t
> (exact? 5)
#t
> (exact? 1/2)
#t
> (exact? 9999999999999999999999999999/2)
#t
> (exact? -3/4)
#t
> (exact? 1+2i)
#t
> (exact? 1/2+3/4i)
#t
> (exact? 1.0)
#f
> (exact? 1.0+3i)
#f
> (exact? 3+1.0i)
#f
> (exact? "3")
#f
"""
def test_inexact_prediace(doctest):
"""
> (inexact? 1)
#f
> (inexact? 1.0)
#t
> (inexact? 1+2i)
#f
> (inexact? 1.0+2.0i)
#t
> (inexact? 1.1+3i)
#t
"""
def test_make_rectangular(doctest):
"""
> (make-rectangular 0 0)
0
> (make-rectangular 3 4)
3+4i
> (make-rectangular 3.0 4.0)
3.0+4.0i
;> (make-rectangular 0 0.4)
;0+0.4f0i
;FIXME: result is correct, smthng wrong with read
"""
def test_sqrt(doctest):
"""
> (sqrt 1)
1
> (sqrt 2)
1.4142135623730951
> (sqrt 3)
1.7320508075688772
> (sqrt 4)
2
> (sqrt 5)
2.23606797749979
> (sqrt 6)
2.449489742783178
> (sqrt 7)
2.6457513110645907
> (sqrt 8)
2.8284271247461903
> (sqrt 9)
3
> (sqrt 10)
3.1622776601683795
> (sqrt 11)
3.3166247903554
> (sqrt 12)
3.4641016151377544
> (sqrt 13)
3.605551275463989
> (sqrt 14)
3.7416573867739413
> (sqrt 15)
3.872983346207417
> (sqrt 16)
4
;> (sqrt -7)
;0+2.6457513110645907i
;> (sqrt -3)
;0+1.7320508075688772i
;> (sqrt -3.14)
;0+1.772004514666935i
;FIXME: results are correct, smthng wrong with read
"""
def test_sqrt2():
val = W_Flonum(-0.0).arith_sqrt().value
assert math.copysign(1, val) == -1
def test_integer_length(doctest):
"""
> (integer-length 8)
4
> (integer-length -8)
3
> (integer-length 0)
0
> (integer-length (expt 2 10))
11
> (integer-length (expt 2 20))
21
> (integer-length (expt 2 100))
101
> (integer-length (- (expt 2 10)))
10
> (integer-length (- (expt 2 20)))
20
> (integer-length (- (expt 2 100)))
100
> (integer-length 3713820117856140828992454656)
92
"""
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
import hamcrest as hc
import pandas
from parameterized import param
from parameterized import parameterized
from apache_beam import Create
from apache_beam import Map
from apache_beam.io import filebasedsource
from apache_beam.io import source_test_utils
from apache_beam.io.iobase import RangeTracker
from apache_beam.io.parquetio import ReadAllFromParquet
from apache_beam.io.parquetio import ReadAllFromParquetBatched
from apache_beam.io.parquetio import ReadFromParquet
from apache_beam.io.parquetio import ReadFromParquetBatched
from apache_beam.io.parquetio import WriteToParquet
from apache_beam.io.parquetio import _create_parquet_sink
from apache_beam.io.parquetio import _create_parquet_source
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
try:
import pyarrow as pa
import pyarrow.lib as pl
import pyarrow.parquet as pq
except ImportError:
pa = None
pl = None
pq = None
@unittest.skipIf(pa is None, "PyArrow is not installed.")
class TestParquet(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
self.temp_dir = tempfile.mkdtemp()
self.RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
},
{
'name': 'Henry',
'favorite_number': 3,
'favorite_color': 'green'
},
{
'name': 'Toby',
'favorite_number': 7,
'favorite_color': 'brown'
},
{
'name': 'Gordon',
'favorite_number': 4,
'favorite_color': 'blue'
},
{
'name': 'Emily',
'favorite_number': -1,
'favorite_color': 'Red'
},
{
'name': 'Percy',
'favorite_number': 6,
'favorite_color': 'Green'
}]
self.SCHEMA = pa.schema([('name', pa.string()),
('favorite_number', pa.int64()),
('favorite_color', pa.string())])
self.SCHEMA96 = pa.schema([('name', pa.string()),
('favorite_number', pa.timestamp('ns')),
('favorite_color', pa.string())])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def _record_to_columns(self, records, schema):
col_list = []
for n in schema.names:
column = []
for r in records:
column.append(r[n])
col_list.append(column)
return col_list
def _records_as_arrow(self, schema=None, count=None):
if schema is None:
schema = self.SCHEMA
if count is None:
count = len(self.RECORDS)
len_records = len(self.RECORDS)
data = []
for i in range(count):
data.append(self.RECORDS[i % len_records])
col_data = self._record_to_columns(data, schema)
col_array = [pa.array(c, schema.types[cn]) for cn, c in enumerate(col_data)]
return pa.Table.from_arrays(col_array, schema.names)
def _write_data(
self,
directory=None,
schema=None,
prefix=tempfile.template,
row_group_size=1000,
codec='none',
count=None):
if directory is None:
directory = self.temp_dir
with tempfile.NamedTemporaryFile(delete=False, dir=directory,
prefix=prefix) as f:
table = self._records_as_arrow(schema, count)
pq.write_table(
table,
f,
row_group_size=row_group_size,
compression=codec,
use_deprecated_int96_timestamps=True)
return f.name
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp(dir=self.temp_dir)
for _ in range(num_files):
self._write_data(directory=temp_dir, prefix='mytemp')
return temp_dir + os.path.sep + 'mytemp*'
def _run_parquet_test(
self,
pattern,
columns,
desired_bundle_size,
perform_splitting,
expected_result):
source = _create_parquet_source(pattern, columns=columns)
if perform_splitting:
assert desired_bundle_size
sources_info = [
(split.source, split.start_position, split.stop_position)
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(sources_info) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = [self._records_as_arrow()]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = [self._records_as_arrow()]
self._run_parquet_test(file_name, None, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_parquet_source'
source = \
_create_parquet_source(
file_name,
validate=False
)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_parquet_source'
read = \
ReadFromParquet(
file_name,
validate=False)
read_batched = \
ReadFromParquetBatched(
file_name,
validate=False)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(
DisplayData.create_from(read).items,
hc.contains_inanyorder(*expected_items))
hc.assert_that(
DisplayData.create_from(read_batched).items,
hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_parquet_sink'
sink = _create_parquet_sink(
file_name,
self.SCHEMA,
'none',
1024 * 1024,
1000,
False,
'.end',
0,
None,
'application/x-parquet')
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_parquet_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'none'),
DisplayDataItemMatcher('row_group_buffer_size', str(1024 * 1024)),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_parquet_sink'
write = WriteToParquet(file_name, self.SCHEMA)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('codec', 'none'),
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher('row_group_buffer_size', str(64 * 1024 * 1024)),
DisplayDataItemMatcher(
'file_pattern',
'some_parquet_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_transform_int96(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
# pylint: disable=c-extension-no-member
with self.assertRaises(pl.ArrowInvalid):
# Should throw an error "ArrowInvalid: Casting from timestamp[ns] to
# timestamp[us] would lose data"
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA96, num_shards=1, shard_name_template='')
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquet(path) \
| Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
def test_batched_read(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS, reshuffle=False) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquetBatched(path)
assert_that(readback, equal_to([self._records_as_arrow()]))
@parameterized.expand([
param(compression_type='snappy'),
param(compression_type='gzip'),
param(compression_type='brotli'),
param(compression_type='lz4'),
param(compression_type='zstd')
])
def test_sink_transform_compressed(self, compression_type):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA, codec=compression_type,
num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquet(path + '*') \
| Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
def test_read_reentrant(self):
file_name = self._write_data(count=6, row_group_size=3)
source = _create_parquet_source(file_name)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_without_splitting_multiple_row_group(self):
file_name = self._write_data(count=12000, row_group_size=1000)
# We expect 12000 elements, split into batches of 1000 elements. Create
# a list of pa.Table instances to model this expecation
expected_result = [
pa.Table.from_batches([batch]) for batch in self._records_as_arrow(
count=12000).to_batches(chunksize=1000)
]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_read_with_splitting_multiple_row_group(self):
file_name = self._write_data(count=12000, row_group_size=1000)
# We expect 12000 elements, split into batches of 1000 elements. Create
# a list of pa.Table instances to model this expecation
expected_result = [
pa.Table.from_batches([batch]) for batch in self._records_as_arrow(
count=12000).to_batches(chunksize=1000)
]
self._run_parquet_test(file_name, None, 10000, True, expected_result)
def test_dynamic_work_rebalancing(self):
file_name = self._write_data(count=120, row_group_size=20)
source = _create_parquet_source(file_name)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
def test_min_bundle_size(self):
file_name = self._write_data(count=120, row_group_size=20)
source = _create_parquet_source(
file_name, min_bundle_size=100 * 1024 * 1024)
splits = [split for split in source.split(desired_bundle_size=1)]
self.assertEqual(len(splits), 1)
source = _create_parquet_source(file_name, min_bundle_size=0)
splits = [split for split in source.split(desired_bundle_size=1)]
self.assertNotEqual(len(splits), 1)
def _convert_to_timestamped_record(self, record):
timestamped_record = record.copy()
timestamped_record['favorite_number'] =\
pandas.Timestamp(timestamped_record['favorite_number'])
return timestamped_record
def test_int96_type_conversion(self):
file_name = self._write_data(
count=120, row_group_size=20, schema=self.SCHEMA96)
orig = self._records_as_arrow(count=120, schema=self.SCHEMA96)
expected_result = [
pa.Table.from_batches([batch])
for batch in orig.to_batches(chunksize=20)
]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_split_points(self):
file_name = self._write_data(count=12000, row_group_size=3000)
source = _create_parquet_source(file_name)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There are a total of four row groups. Each row group has 3000 records.
# When reading records of the first group, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEqual(
split_points_report,
[
(0, RangeTracker.SPLIT_POINTS_UNKNOWN),
(1, RangeTracker.SPLIT_POINTS_UNKNOWN),
(2, RangeTracker.SPLIT_POINTS_UNKNOWN),
(3, 1),
])
def test_selective_columns(self):
file_name = self._write_data()
orig = self._records_as_arrow()
expected_result = [
pa.Table.from_arrays([orig.column('name')], names=['name'])
]
self._run_parquet_test(file_name, ['name'], None, False, expected_result)
def test_sink_transform_multiple_row_group(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# writing 623200 bytes of data
_ = p \
| Create(self.RECORDS * 4000) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, codec='none',
shard_name_template='', row_group_buffer_size=250000)
self.assertEqual(pq.read_metadata(path).num_row_groups, 3)
def test_read_all_from_parquet_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS))
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()]))
def test_read_all_from_parquet_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 3))
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 3))
def test_read_all_from_parquet_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 5))
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 5))
def test_read_all_from_parquet_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 10))
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 10))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.