gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- encoding: utf-8 -*-
from __future__ import print_function
import json
import time
import codecs
from . errorObjs import *
from collections import namedtuple, defaultdict
__all__ = ["LogEnvManager","LogEnvironment", "LogAgent"]
Position = namedtuple('position', ['x', 'y'])
class DictAttr(dict):
"""A special dictionary that can be used with attributes."""
def __init__(self, *args, **kwargs):
super(DictAttr, self).__init__(*args, **kwargs)
self.__dict__ = self
class Box(object):
"""Class for boxes objects."""
id_num = 0
def __init__(self, name=None):
self._name = self.__get_name() if name is None else name
@staticmethod
def __get_name():
"""Get a prefixed name for the object."""
Box.id_num += 1
return "Box_{0}".format(Box.id_num)
def __getattr__(self, attr):
if attr == "name":
return self._name
def __repr__(self):
return "{0}".format(self._name)
class Airplane(object):
"""Class for airplanes objects."""
id_num = 0
def __init__(self, name=None, maxbox=10):
self._name = self.__get_name() if name is None else name
self._boxes = DictAttr()
self._maxbox = maxbox
@staticmethod
def __get_name():
"""Get a prefixed name for the object."""
Airplane.id_num += 1
return "Airplane_{0}".format(Airplane.id_num)
def __contains__(self, key):
return key in self._boxes
def __getattr__(self, attr):
if attr == "name":
return self._name
elif attr == "maxbox":
return self._maxbox
elif attr == "boxes":
return self._boxes
def __repr__(self):
string = "{0} with {1}/{2} boxes: [".format(
self._name, len(self._boxes), self._maxbox)
for box in sorted(self._boxes.values(),
key=lambda obj: obj.name):
string += "{0}, ".format(box)
string = string[:-2] + "]" if len(self._boxes) > 0 else string + "]"
return string
def set_max_box(self, max_):
"""Set the maximum number of boxes that the airplane can transport."""
self._maxbox = max_
def add_box(self, box):
"""Add a box to the airplane (only for environment initialization)."""
if len(self._boxes) < self._maxbox:
self._boxes[box.name] = box
else:
raise AirplaneMaxBoxExided(self)
class Airport(object):
"""Class for airports objects."""
id_num = 0
def __init__(self, name=None, position=None):
self._position = Position(
0, 0) if position is None else Position(*position)
self._name = self.__get_name() if name is None else name
self._boxes = DictAttr()
self._airplanes = DictAttr()
self._neighbors = DictAttr()
@staticmethod
def __get_name():
"""Get a prefixed name for the object."""
Airport.id_num += 1
return "Airport_{0}".format(Airport.id_num)
def __contains__(self, key):
return key in self._boxes or key in self._airplanes
def __getattr__(self, attr):
if attr == "name":
return self._name
elif attr == "position":
return self._position
elif attr == "neighbors":
return self._neighbors
elif attr == "boxes":
return self._boxes
elif attr == "airplanes":
return self._airplanes
elif attr == "edges":
return self._neighbors
def __repr__(self):
string = "{0} at {1} {{\n".format(self._name, self._position)
string += "\tboxes:\n".format(len(self._boxes))
for box in sorted(self._boxes.values(),
key=lambda obj: obj.name):
string += "\t\t- {0}\n".format(box)
string += "\tairplanes:\n".format(len(self._boxes))
for airplane in sorted(self._airplanes.values(),
key=lambda obj: obj.name):
string += "\t\t- {0}\n".format(airplane)
string += "\tneighbors:\n".format(len(self._boxes))
for airport_name, weight in sorted(self._neighbors.items()):
string += "\t\t- {0} -> {1}\n".format(airport_name, weight)
string += "}"
return string
def set_position(self, position):
"""Set position of the airport (only for environment initialization)."""
self._position = Position(*position)
def add_link(self, airport_name, weight=1):
"""Add a link between airports (only for environment initialization)."""
self._neighbors[airport_name] = weight
def add_airplane(self, airplane):
"""Add an airplane to the airport (only for environment initialization)."""
self._airplanes[airplane.name] = airplane
def add_box(self, box):
"""Add a box to the airport (only for environment initialization)."""
self._boxes[box.name] = box
class LogAgent(object):
"""Agent for LogEnvironment"""
def __init__(self):
self.score = 0
self.moves = 0
self.goals = 0
def solve(self, status, goal):
"""Virtual method called from the environment.
This method must return a list of tuples, like this:
[(method name, *args)]
Arguments depend on called method.
"""
pass
def get_formatted_score(self, num_goals):
return "Score of {0} in {1} moves. {2}/{3} goals reached.".format(self.score, self.moves, self.goals, num_goals)
class LogEnvironment(object):
"""Class that represent the world for the simulation."""
def __init__(self, json_file=None, obj=None):
self.__allowed_methods = ["load", "unload", "move"]
self._airports = DictAttr()
self._airplanes = DictAttr()
self._boxes = DictAttr()
self._goal = list()
self.num_goals = 0
self._agent = None
temp_airplanes = dict()
temp_boxes = dict()
if obj is not None and isinstance(obj, LogEnvironment):
for box in obj.boxes:
self._boxes[box] = Box(box)
for airplane, air_obj in obj.airplanes.items():
self._airplanes[airplane] = Airplane(airplane, air_obj.maxbox)
for box in air_obj.boxes:
self._airplanes[airplane].add_box(self._boxes[box])
for airport, airport_obj in obj.airports.items():
self._airports[airport] = Airport(airport)
self._airports[airport].set_position(airport_obj.position)
for box in airport_obj.boxes:
self._airports[airport].add_box(self._boxes[box])
for airplane in airport_obj.airplanes:
self._airports[airport].add_airplane(
self._airplanes[airplane])
for neighbor, weight in airport_obj.neighbors.items():
self._airports[airport].add_link(neighbor, weight)
self._goal = [elem for elem in obj.goal_list]
self.num_goals = obj.num_goals
del self._agent
if json_file is not None:
with codecs.open(json_file, 'r', 'utf-8') as fsj:
config = json.load(fsj)
initial_status = config['initial_status']
if isinstance(initial_status['airports'], list):
pass
else:
for num in range(0, initial_status['airports']):
new_airport = Airport()
self._airports[new_airport.name] = new_airport
if isinstance(initial_status['airplanes'], list):
pass
else:
for num in range(0, initial_status['airplanes']):
new_airplane = Airplane()
self._airplanes[new_airplane.name] = new_airplane
temp_airplanes[new_airplane.name] = new_airplane
if isinstance(initial_status['boxes'], list):
pass
else:
for num in range(0, initial_status['boxes']):
new_box = Box()
self._boxes[new_box.name] = new_box
temp_boxes[new_box.name] = new_box
self.__add_edges(
initial_status['edges'])
self.__add_vertices(
initial_status['vertices'], temp_airplanes, temp_boxes)
# Setup goal
for sentence in config['goal']:
objs, in_ = [
string.strip() for string in sentence.split("in")]
objs = [string.strip()
for string in objs.split(",")]
self.num_goals += len(objs)
self._goal.append((objs, in_))
# Reset id creation for multi environments
Airport.id_num = 0
Airplane.id_num = 0
Box.id_num = 0
self.__verify_goal()
def __getattr__(self, attr):
if attr == "airports":
return self._airports
elif attr == "airplanes":
return self._airplanes
elif attr == "boxes":
return self._boxes
elif attr == "goal_list":
return self._goal
elif attr == "goal":
return self.get_goal()
elif attr == "clone":
return self.get_status()
elif attr == "moves":
return self.__moves()
def __eq__(self, obj):
return self.__repr__() == obj.__repr__()
def __ne__(self, obj):
return not self == obj
def __repr__(self):
string = "----- Environment -------\n"
for airport_name in sorted(self._airports):
string += "{0}".format(self._airports[airport_name])
string += "\n"
string += "-------------------------"
return string
def __moves(self):
"""Returns a list of all possible moves from current status."""
list_ = list()
for airport_name, airport_obj in self.airports.items():
if len(airport_obj.boxes) > 0 and len(airport_obj.airplanes) > 0:
for box in airport_obj.boxes:
for airplane_name, airplane in airport_obj.airplanes.items():
if len(airplane.boxes) < airplane.maxbox:
list_.append(("load", box, airplane_name))
if len(airport_obj.airplanes) > 0:
for airplane_name, airplane in airport_obj.airplanes.items():
if len(airplane.boxes) > 0:
for box in airplane.boxes:
list_.append(("unload", box, airplane_name))
for airport_target in self.airports:
if airport_target != airport_name and\
airport_target in airport_obj.edges:
list_.append(
("move", airplane_name, airport_name, airport_target))
return list_
def __add_edges(self, edges):
"""Add edges."""
for airport_name in edges:
if airport_name not in self._airports:
raise AirplaneNotExist(airport_name)
for edge, weight in edges[airport_name].items():
if edge not in self._airports:
raise AirplaneNotExist(edge)
self._airports[airport_name].add_link(edge, weight)
self._airports[edge].add_link(airport_name, weight)
def __add_vertices(self, vertices, temp_airplanes, temp_boxes):
"""Add vertices."""
for airport_name in vertices:
if airport_name not in self._airports:
raise AirplaneNotExist(airport_name)
airport = vertices[airport_name]
if 'position' in airport:
self._airports[airport_name].set_position(
airport['position'])
if 'boxes' in airport:
for box_name in airport['boxes']:
if box_name in temp_boxes:
self._airports[airport_name].add_box(
temp_boxes.pop(box_name))
else:
raise BoxAlreadyAssigned(box_name)
if 'airplanes' in airport:
for airplane_name in airport['airplanes']:
if airplane_name not in temp_airplanes:
raise AirplaneAlreadyAssigned(airplane_name)
temp_airplanes[airplane_name].set_max_box(
airport['airplanes'][airplane_name]['maxbox'])
if 'boxes' in airport['airplanes'][airplane_name]:
for box_name in airport['airplanes'][airplane_name]['boxes']:
if box_name in temp_boxes:
temp_airplanes[airplane_name].add_box(
temp_boxes.pop(box_name))
else:
raise BoxAlreadyAssigned(box_name)
self._airports[airport_name].add_airplane(
temp_airplanes.pop(airplane_name))
def get_status(self):
"""Return the current status to the agent."""
return LogEnvironment(obj=self)
def get_goal(self):
"""The the goal status."""
goal = DictAttr()
for objs, dir_ in self._goal:
if dir_ in goal:
goal[dir_] += [obj for obj in objs]
else:
goal[dir_] = [obj for obj in objs]
return goal
def __verify_goal(self):
"""Verify if the goal is plausible."""
temp = dict()
already_visited = list()
for objs, dir_ in self._goal:
temp[dir_] = set([obj for obj in objs])
for key in temp:
already_visited.append(key)
for compare in [key_c for key_c in temp if key_c not in already_visited]:
if len(temp[key] & temp[compare]) != 0:
raise GoalNotPlausible()
def check_goal(self, target=None):
"""Check if the goal is reached."""
results = True
if target is None:
for objs, dir_ in self._goal:
if dir_ in self._airports:
for obj in objs:
results = results and obj in self._airports[dir_]
if getattr(self, "_agent", False) and obj in self._airports[dir_]:
self._agent.goals += 1
elif dir_ in self._airplanes:
for obj in objs:
results = results and obj in self._airplanes[dir_]
if getattr(self, "_agent", False)and obj in self._airplanes[dir_]:
self._agent.goals += 1
elif target is not None:
for objs, dir_ in self._goal:
for obj in objs:
if obj == target:
if dir_ in self._airports:
results = results and obj in self._airports[dir_]
elif dir_ in self._airplanes:
results = results and obj in self._airplanes[dir_]
else:
results = None
return results
def load(self, box, airplane_name):
"""Load a box in an airplane."""
if box not in self._boxes or\
airplane_name not in self._airplanes:
if getattr(self, "_agent", False):
self._agent.score -= 100
else:
temp_name = None
for airport_name, airport in self._airports.items():
if airplane_name in airport:
temp_name = airport_name
if box in self._airports[temp_name].boxes:
self._airports[temp_name].airplanes[airplane_name].add_box(
self._airports[temp_name].boxes.pop(box))
if getattr(self, "_agent", False):
self._agent.score += 15
self._agent.moves += 1
def unload(self, box, airplane_name):
"""Unload a box from an airplane to the airport."""
if box not in self._boxes or\
airplane_name not in self._airplanes:
if getattr(self, "_agent", False):
self._agent.score -= 100
else:
temp_name = None
for airport_name, airport in self._airports.items():
if airplane_name in airport:
temp_name = airport_name
if box in self._airports[temp_name].airplanes[airplane_name].boxes:
self._airports[temp_name].add_box(
self._airports[temp_name].airplanes[airplane_name].boxes.pop(box))
if getattr(self, "_agent", False):
self._agent.score += 10
self._agent.moves += 1
def move(self, airplane_name, from_, to_):
"""Move an airplane from an airport to another airport."""
if from_ not in self._airports or\
to_ not in self._airports or\
airplane_name not in self._airplanes:
if getattr(self, "_agent", False):
self._agent.score -= 100
else:
if from_ in self._airports[to_].edges:
self._airports[to_].airplanes[airplane_name] = self._airports[
from_].airplanes.pop(airplane_name)
if getattr(self, "_agent", False):
self._agent.score += 10 * self._airports[to_].edges[from_]
self._agent.moves += 1
else:
raise LinkNotExist(from_, to_)
def add_agent(self, agent):
"""Add an agent to the environment."""
if isinstance(agent, LogAgent):
self._agent = agent
def execute(self, list_=None):
"""Execute the solution of the agent."""
if getattr(self, "_agent", False):
if self._agent is not None:
actions = self._agent.solve(self.get_status(), self.get_goal())
if isinstance(actions, list):
for action in actions:
method, args = action[0], action[1:]
if method in self.__allowed_methods:
func = getattr(self, method, None)
if callable(func):
func(*args)
else:
if isinstance(list_, list):
for action in list_:
method, args = action[0], action[1:]
if method in self.__allowed_methods:
func = getattr(self, method, None)
if callable(func):
func(*args)
elif isinstance(list_, tuple):
method, args = list_[0], list_[1:]
if method in self.__allowed_methods:
func = getattr(self, method, None)
if callable(func):
func(*args)
else:
raise ActionNotAList()
return self
def formatted_score(self):
"""Return the agent's score and moves number."""
self.check_goal()
return self._agent.get_formatted_score(self.num_goals)
def score(self):
"""Returns the agent's score."""
return self._agent.score
class LogEnvManager(object):
"""Class to manage multiple environments and agents."""
def __init__(self, agent_classes, list_of_agents, list_of_environments):
self.__data = defaultdict(list)
for agent_name in list_of_agents:
for environment in list_of_environments:
new_env = LogEnvironment(environment)
new_env.add_agent(agent_classes[agent_name]())
self.__data[agent_name].append((environment, new_env))
self.__times = dict()
def execute(self):
"""Execute for every agent every environment."""
for agent_name, env_l in self.__data.items():
for env_t in env_l:
start_time = time.time()
env_t[1].execute()
final_time = time.time() - start_time
self.__times[(agent_name, env_t[0])] = final_time
def get_score(self):
"""Show all results."""
print("!!! Score !!!")
for agent_name, env_l in self.__data.items():
print("Agent: {0} -----".format(agent_name))
for env_name, env in env_l:
print("\t- {0} -> {1} Time elapsed : {2:.3f} seconds".format(env_name, env.formatted_score(), self.__times[(agent_name, env_name)]))
print("EndAgent -----")
|
|
# coding=utf-8
from collections import defaultdict
import datetime
import decimal
import logging
from json import dumps, loads, JSONEncoder
import django
from django.conf import settings
from django.core import serializers
from django.http import HttpResponse, Http404
from django.utils.crypto import constant_time_compare
from django.utils.decorators import method_decorator, classonlymethod
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import FormView
from django.db.models.query import QuerySet
from django.utils.functional import curry, Promise
from .compat import force_u, import_module
from .models import APIKey
from .utils import get_pairs_sign, prepare_uuid_string
LOG = logging.getLogger('formapi')
def autodiscover():
for app in settings.INSTALLED_APPS:
try:
import_module('%s.calls' % app)
except ImportError:
continue
class AddHeaderAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
msg = ' '.join((self.extra.get('header'), msg))
return msg, kwargs
class DjangoJSONEncoder(JSONEncoder):
def default(self, obj):
date_obj = self.default_date(obj)
if django.VERSION < (1, 9):
from django.db.models.query import ValuesQuerySet
else:
class ValuesQuerySet(object):
pass
if date_obj is not None:
return date_obj
elif isinstance(obj, decimal.Decimal):
return str(obj)
elif isinstance(obj, ValuesQuerySet):
return list(obj)
elif isinstance(obj, QuerySet):
return loads(serializers.serialize('json', obj))
elif isinstance(obj, Promise):
return force_u(obj)
return JSONEncoder.default(self, obj)
def default_date(self, obj):
if isinstance(obj, datetime.datetime):
r = obj.isoformat()
if obj.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
if obj.tzinfo is not None and obj.tzinfo.utcoffset(obj) is not None:
raise ValueError("JSON can't represent timezone-aware times.")
r = obj.isoformat()
if obj.microsecond:
r = r[:12]
return r
elif isinstance(obj, datetime.timedelta):
return obj.seconds
dumps = curry(dumps, cls=DjangoJSONEncoder)
class API(FormView):
template_name = 'formapi/api/form.html'
signed_requests = True
call_mapping = defaultdict(lambda: defaultdict(dict))
@classmethod
def register(cls, call_cls, namespace, name=None, version='beta'):
call_name = name or call_cls.__name__
API.call_mapping[version][namespace][call_name] = call_cls
@classonlymethod
def as_view(cls, **initkwargs):
autodiscover()
return super(API, cls).as_view(**initkwargs)
def get_form_class(self):
try:
return API.call_mapping[self.version][self.namespace][self.call]
except KeyError:
raise Http404
def get_form_kwargs(self):
kwargs = super(API, self).get_form_kwargs()
if self.api_key:
kwargs['api_key'] = self.api_key
return kwargs
def get_request_params(self):
if self.request.method == 'POST':
return self.request.POST
else:
return self.request.GET
def get_access_params(self):
params = self.get_request_params()
key = params.get('key')
sign = params.get('sign')
return key, sign
def sign_ok(self, sign):
digest = get_pairs_sign(secret=prepare_uuid_string(self.api_key.secret),
sorted_pairs=self.normalized_parameters())
digest = prepare_uuid_string(digest)
sign = prepare_uuid_string(sign)
return constant_time_compare(sign, digest)
def normalized_parameters(self):
"""
Normalize django request to key value pairs sorted by key first and then value
"""
for field in sorted(self.get_form(self.get_form_class()).fields.keys()):
for item in sorted(self.get_request_params().getlist(field) or []):
if item is not None:
yield field, item
def render_to_json_response(self, context, **response_kwargs):
data = dumps(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_valid(self, form):
self.log.info('Valid form received')
test_call = False
if self.api_key:
test_call = self.api_key.test
data = form.action(test_call)
response_data = {
'success': not bool(len(form.errors)),
'errors': form.errors,
'data': data
}
return self.render_to_json_response(response_data)
def form_invalid(self, form):
self.log.info('Invalid form received')
response_data = {
'success': False,
'errors': form.errors,
'data': False
}
return self.render_to_json_response(response_data, status=400)
def get_log_header(self):
if not hasattr(self, 'log_header'):
key = getattr(self, 'api_key', None)
self.log_header = '[%s][%s][%s]' % (
self.request.META['REMOTE_ADDR'],
self.request.META['REQUEST_METHOD'],
key.key if key else 'unknown')
return self.log_header
def setup_log(self, log):
self.log = AddHeaderAdapter(log, {'header': self.get_log_header()})
def authorize(self):
if getattr(self.get_form_class(), 'signed_requests', API.signed_requests):
key, sign = self.get_access_params()
# Check for not revoked api key
try:
self.api_key = APIKey.objects.get(key=key, revoked=False)
except APIKey.DoesNotExist:
return False
# Check request signature
return self.sign_ok(sign)
return True
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
# Set up request
self.request = request
# Set up form class
self.version = kwargs['version']
self.namespace = kwargs['namespace']
self.call = kwargs['call']
# Check access params
self.api_key = None
access_granted = self.authorize()
# Setup logging to add header
self.setup_log(LOG)
# Authorize request
if access_granted:
self.log.info('Access Granted %s', self.get_request_params())
return super(API, self).dispatch(request, *args, **kwargs)
# Access denied
self.log.warning('Access Denied %s', self.get_request_params())
return HttpResponse(status=401)
|
|
# Copyright 2001-2013 Python Software Foundation; All Rights Reserved
# https://github.com/aliles/funcsigs - Licenced under the Apache 2.0
# Licence - see: licences/FUNCSIGS.rst
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.2+.
"""
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
from collections import OrderedDict
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = list(sig.parameters.values())[0].replace(
kind=_POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
else:
return sig
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=list(new_params.values()))
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = list(self.parameters.values())
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
|
|
import time
import datetime
import uuid
import os
import sys
import os.path
import threading
import zipfile
import hashlib
import string
from future.utils import iteritems
from globus_sdk import TransferClient
from globus_sdk import TransferData
from globus_sdk import NativeAppAuthClient
from globus_sdk import RefreshTokenAuthorizer
# TO BE REMOVED for python2.7
import requests.packages.urllib3
try:
requests.packages.urllib3.disable_warnings()
except:
pass
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestermover import mover_utils
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
# Define dummy transfer identifier
dummy_transfer_id_base = 'dummy_id_for_in'
# lock to get a unique ID
uLock = threading.Lock()
# number to get a unique ID
uID = 0
# logger
_logger = core_utils.setup_logger('go_bulk_preparator')
from pandaharvester.harvestermisc import globus_utils
def validate_transferid(transferid):
tmptransferid = transferid.replace('-','')
return all(c in string.hexdigits for c in tmptransferid)
def dump(obj):
for attr in dir(obj):
if hasattr( obj, attr ):
print( "obj.%s = %s" % (attr, getattr(obj, attr)))
# Globus plugin for stager with bulk transfers. For JobSpec and DBInterface methods, see
# https://github.com/PanDAWMS/panda-harvester/wiki/Utilities#file-grouping-for-file-transfers
class GlobusBulkPreparator(PluginBase):
next_id = 0
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# make logger
tmpLog = self.make_logger(_logger, 'ThreadID={0}'.format(threading.current_thread().ident),
method_name='GlobusBulkPreparator __init__ {} ')
tmpLog.debug('__init__ start')
self.thread_id = threading.current_thread().ident
self.id = GlobusBulkPreparator.next_id
GlobusBulkPreparator.next_id += 1
with uLock:
global uID
self.dummy_transfer_id = '{0}_{1}'.format(dummy_transfer_id_base, 'XXXX')
uID += 1
uID %= harvester_config.preparator.nThreads
# create Globus Transfer Client
try:
self.tc = None
# need to get client_id and refresh_token from PanDA server via harvester cache mechanism
tmpLog.debug('about to call dbInterface.get_cache(globus_secret)')
c_data = self.dbInterface.get_cache('globus_secret')
if (not c_data == None) and c_data.data['StatusCode'] == 0 :
tmpLog.debug('Got the globus_secrets from PanDA')
self.client_id = c_data.data['publicKey'] # client_id
self.refresh_token = c_data.data['privateKey'] # refresh_token
tmpStat, self.tc = globus_utils.create_globus_transfer_client(tmpLog,self.client_id,self.refresh_token)
if not tmpStat:
self.tc = None
errStr = 'failed to create Globus Transfer Client'
tmpLog.error(errStr)
else :
self.client_id = None
self.refresh_token = None
self.tc = None
errStr = 'failed to get Globus Client ID and Refresh Token'
tmpLog.error(errStr)
except:
core_utils.dump_error_message(tmpLog)
# tmp debugging
tmpLog.debug('self.id = {0}'.format(self.id))
tmpLog.debug('self.dummy_transfer_id = {0}'.format(self.dummy_transfer_id))
# tmp debugging
tmpLog.debug('__init__ finish')
# get dummy_transfer_id
def get_dummy_transfer_id(self):
return self.dummy_transfer_id
# set dummy_transfer_id for testing
def set_dummy_transfer_id_testing(self,dummy_transfer_id):
self.dummy_transfer_id = dummy_transfer_id
# set FileSpec.status
def set_FileSpec_status(self,jobspec,status):
# loop over all input files
for fileSpec in jobspec.inFiles:
fileSpec.status = status
# check status
def check_stage_in_status(self, jobspec):
# make logger
tmpLog = self.make_logger(_logger, 'PandaID={0} ThreadID={1}'.format(jobspec.PandaID,threading.current_thread().ident),
method_name='check_stage_in_status')
tmpLog.debug('start')
# show the dummy transfer id and set to a value with the PandaID if needed.
tmpLog.debug('self.dummy_transfer_id = {}'.format(self.dummy_transfer_id))
if self.dummy_transfer_id == '{0}_{1}'.format(dummy_transfer_id_base,'XXXX') :
old_dummy_transfer_id = self.dummy_transfer_id
self.dummy_transfer_id = '{0}_{1}'.format(dummy_transfer_id_base,jobspec.PandaID)
tmpLog.debug('Change self.dummy_transfer_id from {0} to {1}'.format(old_dummy_transfer_id,self.dummy_transfer_id))
# default return
tmpRetVal = (True, '')
# set flag if have db lock
have_db_lock = False
# check that jobspec.computingSite is defined
if jobspec.computingSite is None:
# not found
tmpLog.error('jobspec.computingSite is not defined')
return False, 'jobspec.computingSite is not defined'
else:
tmpLog.debug('jobspec.computingSite : {0}'.format(jobspec.computingSite))
queueConfigMapper = QueueConfigMapper()
queueConfig = queueConfigMapper.get_queue(jobspec.computingSite)
# test we have a Globus Transfer Client
if not self.tc :
errStr = 'failed to get Globus Transfer Client'
tmpLog.error(errStr)
return False, errStr
# set transferID to None
transferID = None
# get transfer groups
groups = jobspec.get_groups_of_input_files(skip_ready=True)
tmpLog.debug('jobspec.get_groups_of_input_files() = : {0}'.format(groups))
# lock if the dummy transfer ID is used to avoid submitting duplicated transfer requests
for dummy_transferID in groups:
# skip if valid transfer ID not dummy one
if validate_transferid(dummy_transferID) :
continue
# lock for 120 sec
tmpLog.debug('attempt to set DB lock for self.id - {0} self.dummy_transfer_id - {1}, dummy_transferID - {2}'.format(self.id,self.dummy_transfer_id,dummy_transferID))
have_db_lock = self.dbInterface.get_object_lock(dummy_transferID, lock_interval=120)
tmpLog.debug(' DB lock result - {0}'.format(have_db_lock))
if not have_db_lock:
# escape since locked by another thread
msgStr = 'escape since locked by another thread'
tmpLog.debug(msgStr)
return None, msgStr
# refresh group information since that could have been updated by another thread before getting the lock
tmpLog.debug('self.dbInterface.refresh_file_group_info(jobspec)')
self.dbInterface.refresh_file_group_info(jobspec)
tmpLog.debug('after self.dbInterface.refresh_file_group_info(jobspec)')
# get transfer groups again with refreshed info
tmpLog.debug('groups = jobspec.get_groups_of_input_files(skip_ready=True)')
groups = jobspec.get_groups_of_input_files(skip_ready=True)
tmpLog.debug('after db lock and refresh - jobspec.get_groups_of_input_files(skip_ready=True) = : {0}'.format(groups))
# the dummy transfer ID is still there
if dummy_transferID in groups:
groupUpdateTime = groups[dummy_transferID]['groupUpdateTime']
# get files with the dummy transfer ID across jobs
fileSpecs_allgroups = self.dbInterface.get_files_with_group_id(dummy_transferID)
msgStr = 'dummy_transferID = {0} self.dbInterface.get_files_with_group_id(dummy_transferID) number of files = {1}'.format(dummy_transferID,len(fileSpecs_allgroups))
tmpLog.debug(msgStr)
fileSpecs = jobspec.get_input_file_specs(dummy_transferID, skip_ready=True)
msgStr = 'dummy_transferID = {0} jobspec.get_input_file_specs(dummy_transferID,skip_ready=True) number of files = {1}'.format(dummy_transferID,len(fileSpecs))
tmpLog.debug(msgStr)
# submit transfer if there are more than 10 files or the group was made before more than 10 min
if len(fileSpecs) >= 10 or \
groupUpdateTime < datetime.datetime.utcnow() - datetime.timedelta(minutes=10):
tmpLog.debug('prepare to transfer files')
# submit transfer and get a real transfer ID
# set the Globus destination Endpoint id and path will get them from Agis eventually
self.Globus_srcPath = queueConfig.preparator['Globus_srcPath']
self.srcEndpoint = queueConfig.preparator['srcEndpoint']
self.Globus_dstPath = self.basePath
#self.Globus_dstPath = queueConfig.preparator['Globus_dstPath']
self.dstEndpoint = queueConfig.preparator['dstEndpoint']
# Test the endpoints and create the transfer data class
errMsg = None
try:
# Test endpoints for activation
tmpStatsrc, srcStr = globus_utils.check_endpoint_activation(tmpLog,self.tc,self.srcEndpoint)
tmpStatdst, dstStr = globus_utils.check_endpoint_activation(tmpLog,self.tc,self.dstEndpoint)
if tmpStatsrc and tmpStatdst:
errStr = 'source Endpoint and destination Endpoint activated'
tmpLog.debug(errStr)
else:
errMsg = ''
if not tmpStatsrc :
errMsg += ' source Endpoint not activated '
if not tmpStatdst :
errMsg += ' destination Endpoint not activated '
# release process lock
tmpLog.debug('attempt to release DB lock for self.id - {0} self.dummy_transfer_id - {1}, dummy_transferID - {2}'.format(self.id,self.dummy_transfer_id,dummy_transferID))
have_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if not have_db_lock:
errMsg += ' - Could not release DB lock for {}'.format(dummy_transferID)
tmpLog.error(errMsg)
tmpRetVal = (None,errMsg)
return tmpRetVal
# both endpoints activated now prepare to transfer data
tdata = None
tdata = TransferData(self.tc,
self.srcEndpoint,
self.dstEndpoint,
sync_level="exists")
# sync_level="checksum")
tmpLog.debug('size of tdata[DATA] - {}'.format(len(tdata['DATA'])))
except:
errStat, errMsg = globus_utils.handle_globus_exception(tmpLog)
# release process lock
tmpLog.debug('attempt to release DB lock for self.id - {0} self.dummy_transfer_id - {1}, dummy_transferID - {2}'.format(self.id,self.dummy_transfer_id,dummy_transferID))
release_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if not release_db_lock:
errMsg += ' - Could not release DB lock for {}'.format(self.dummy_transferID)
tmpLog.error(errMsg)
tmpRetVal = (errStat, errMsg)
return tmpRetVal
# loop over all files
ifile = 0
for fileSpec in fileSpecs:
# only print to log file first 25 files
if ifile < 25 :
msgStr = "fileSpec.lfn - {0} fileSpec.scope - {1}".format(fileSpec.lfn, fileSpec.scope)
tmpLog.debug(msgStr)
if ifile == 25 :
msgStr = "printed first 25 files skipping the rest".format(fileSpec.lfn, fileSpec.scope)
tmpLog.debug(msgStr)
# end debug log file test
scope = 'panda'
if fileSpec.scope is not None :
scope = fileSpec.scope
hash = hashlib.md5()
hash.update('%s:%s' % (scope, fileSpec.lfn))
hash_hex = hash.hexdigest()
correctedscope = "/".join(scope.split('.'))
#srcURL = fileSpec.path
srcURL = "{endPoint}/{scope}/{hash1}/{hash2}/{lfn}".format(endPoint=self.Globus_srcPath,
scope=correctedscope,
hash1=hash_hex[0:2],
hash2=hash_hex[2:4],
lfn=fileSpec.lfn)
dstURL = "{endPoint}/{scope}/{hash1}/{hash2}/{lfn}".format(endPoint=self.Globus_dstPath,
scope=correctedscope,
hash1=hash_hex[0:2],
hash2=hash_hex[2:4],
lfn=fileSpec.lfn)
# add files to transfer object - tdata
if ifile < 25 :
tmpLog.debug("tdata.add_item({},{})".format(srcURL,dstURL))
tdata.add_item(srcURL,dstURL)
ifile += 1
# submit transfer
tmpLog.debug('Number of files to transfer - {}'.format(len(tdata['DATA'])))
try:
transfer_result = self.tc.submit_transfer(tdata)
# check status code and message
tmpLog.debug(str(transfer_result))
if transfer_result['code'] == "Accepted":
# succeeded
# set transfer ID which are used for later lookup
transferID = transfer_result['task_id']
tmpLog.debug('successfully submitted id={0}'.format(transferID))
# set status for files
self.dbInterface.set_file_group(fileSpecs, transferID, 'running')
msgStr = 'submitted transfer with ID={0}'.format(transferID)
tmpLog.debug(msgStr)
else:
# release process lock
tmpLog.debug('attempt to release DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
release_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if release_db_lock:
tmpLog.debug('Released DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
have_db_lock = False
else:
errMsg = 'Could not release DB lock for {}'.format(dummy_transferID)
tmpLog.error(errMsg)
tmpRetVal = (None, transfer_result['message'])
return tmpRetVal
except Exception as e:
errStat,errMsg = globus_utils.handle_globus_exception(tmpLog)
# release process lock
tmpLog.debug('attempt to release DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
release_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if release_db_lock:
tmpLog.debug('Released DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
have_db_lock = False
else :
errMsg += ' - Could not release DB lock for {}'.format(dummy_transferID)
tmpLog.error(errMsg)
return errStat, errMsg
else:
msgStr = 'wait until enough files are pooled'
tmpLog.debug(msgStr)
# release the lock
tmpLog.debug('attempt to release DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
release_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if release_db_lock:
tmpLog.debug('released DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
have_db_lock = False
else:
msgStr += ' - Could not release DB lock for {}'.format(dummy_transferID)
tmpLog.error(msgStr)
# return None to retry later
return None, msgStr
# release the db lock if needed
if have_db_lock:
tmpLog.debug('attempt to release DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
release_db_lock = self.dbInterface.release_object_lock(dummy_transferID)
if release_db_lock:
tmpLog.debug('released DB lock for self.id - {0} dummy_transferID - {1}'.format(self.id,dummy_transferID))
have_db_lock = False
else:
msgStr += ' - Could not release DB lock for {}'.format(dummy_transferID)
tmpLog.error(msgStr)
return None, msgStr
# check transfer with real transfer IDs
# get transfer groups
tmpLog.debug("groups = jobspec.get_groups_of_input_files(skip_ready=True)")
groups = jobspec.get_groups_of_input_files(skip_ready=True)
tmpLog.debug('Number of transfer groups (skip_ready)- {0}'.format(len(groups)))
tmpLog.debug('transfer groups any state (skip_ready)- {0}'.format(groups))
tmpLog.debug("groups = jobspec.get_groups_of_input_files()")
groups = jobspec.get_groups_of_input_files()
tmpLog.debug('Number of transfer groups - {0}'.format(len(groups)))
tmpLog.debug('transfer groups any state - {0}'.format(groups))
tmpLog.debug("groups = jobspec.get_groups_of_input_files(skip_ready=True)")
groups = jobspec.get_groups_of_input_files(skip_ready=True)
if len(groups) == 0:
tmpLog.debug("jobspec.get_groups_of_input_files(skip_ready=True) returned no files ")
tmpLog.debug("check_stage_in_status return status - True ")
return True,''
for transferID in groups:
# allow only valid UUID
if validate_transferid(transferID) :
# get transfer task
tmpStat, transferTasks = globus_utils.get_transfer_task_by_id(tmpLog,self.tc,transferID)
# return a temporary error when failed to get task
if not tmpStat:
errStr = 'failed to get transfer task; tc = %s; transferID = %s' % (str(self.tc),str(transferID))
tmpLog.error(errStr)
return None, errStr
# return a temporary error when task is missing
if transferID not in transferTasks:
errStr = 'transfer task ID - {} is missing'.format(transferID)
tmpLog.error(errStr)
return None, errStr
# succeeded in finding a transfer task by tranferID
if transferTasks[transferID]['status'] == 'SUCCEEDED':
tmpLog.debug('transfer task {} succeeded'.format(transferID))
self.set_FileSpec_status(jobspec,'finished')
return True, ''
# failed
if transferTasks[transferID]['status'] == 'FAILED':
errStr = 'transfer task {} failed'.format(transferID)
tmpLog.error(errStr)
self.set_FileSpec_status(jobspec,'failed')
return False, errStr
# another status
tmpStr = 'transfer task {0} status: {1}'.format(transferID,transferTasks[transferID]['status'])
tmpLog.debug(tmpStr)
return None, tmpStr
# end of loop over transfer groups
tmpLog.debug('End of loop over transfers groups - ending check_stage_in_status function')
return None,'no valid transfer id found'
# trigger preparation
def trigger_preparation(self, jobspec):
# make logger
tmpLog = self.make_logger(_logger, 'PandaID={0} ThreadID={1}'.format(jobspec.PandaID,threading.current_thread().ident),
method_name='trigger_preparation')
tmpLog.debug('start')
# default return
tmpRetVal = (True, '')
# check that jobspec.computingSite is defined
if jobspec.computingSite is None:
# not found
tmpLog.error('jobspec.computingSite is not defined')
return False, 'jobspec.computingSite is not defined'
else:
tmpLog.debug('jobspec.computingSite : {0}'.format(jobspec.computingSite))
# test we have a Globus Transfer Client
if not self.tc :
errStr = 'failed to get Globus Transfer Client'
tmpLog.error(errStr)
return False, errStr
# show the dummy transfer id and set to a value with the PandaID if needed.
tmpLog.debug('self.dummy_transfer_id = {}'.format(self.dummy_transfer_id))
if self.dummy_transfer_id == '{0}_{1}'.format(dummy_transfer_id_base,'XXXX') :
old_dummy_transfer_id = self.dummy_transfer_id
self.dummy_transfer_id = '{0}_{1}'.format(dummy_transfer_id_base,jobspec.PandaID)
tmpLog.debug('Change self.dummy_transfer_id from {0} to {1}'.format(old_dummy_transfer_id,self.dummy_transfer_id))
# set the dummy transfer ID which will be replaced with a real ID in check_stage_in_status()
inFiles = jobspec.get_input_file_attributes(skip_ready=True)
lfns = inFiles.keys()
#for inLFN in inFiles.keys():
# lfns.append(inLFN)
tmpLog.debug('number of lfns - {0} type(lfns) - {1}'.format(len(lfns),type(lfns)))
jobspec.set_groups_to_files({self.dummy_transfer_id: {'lfns': lfns,'groupStatus': 'pending'}})
if len(lfns) < 10:
msgStr = 'jobspec.set_groups_to_files - self.dummy_tranfer_id - {0}, lfns - {1}, groupStatus - pending'.format(self.dummy_transfer_id,lfns)
else:
tmp_lfns = lfns[:10]
msgStr = 'jobspec.set_groups_to_files - self.dummy_tranfer_id - {0}, lfns (first 25) - {1}, groupStatus - pending'.format(self.dummy_transfer_id,tmp_lfns)
tmpLog.debug(msgStr)
fileSpec_list = jobspec.get_input_file_specs(self.dummy_transfer_id, skip_ready=True)
tmpLog.debug('call jobspec.get_input_file_specs({0}, skip_ready=True) num files returned = {1}'.format(self.dummy_transfer_id,len(fileSpec_list)))
tmpLog.debug('call self.dbInterface.set_file_group(jobspec.get_input_file_specs(self.dummy_transfer_id,skip_ready=True),self.dummy_transfer_id,pending)')
tmpStat = self.dbInterface.set_file_group(fileSpec_list,self.dummy_transfer_id,'pending')
msgStr = 'called self.dbInterface.set_file_group(jobspec.get_input_file_specs(self.dummy_transfer_id,skip_ready=True),self.dummy_transfer_id,pending) return Status {}'.format(tmpStat)
tmpLog.debug(msgStr)
return True, ''
# make label for transfer task
def make_label(self, jobspec):
return "IN-{computingSite}-{PandaID}".format(computingSite=jobspec.computingSite,
PandaID=jobspec.PandaID)
# resolve input file paths
def resolve_input_paths(self, jobspec):
# get input files
inFiles = jobspec.get_input_file_attributes()
# set path to each file
for inLFN, inFile in iteritems(inFiles):
inFile['path'] = mover_utils.construct_file_path(self.basePath, inFile['scope'], inLFN)
# set
jobspec.set_input_file_paths(inFiles)
return True, ''
# Globus specific commands
|
|
"""
SQLite3 backend for Hachi, used during analysis.
"""
import re
import sys
import datetime
from django.analysis.tracer import mark_sql_call, is_analysis_running, \
is_analysis_paused, taint, in_view, is_tainted
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.utils import parameterize_limit
from django.utils.safestring import SafeString
import django.htoken as htoken
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError, e1:
from sqlite3 import dbapi2 as Database
except ImportError, exc:
import sys
from django.core.exceptions import ImproperlyConfigured
if sys.version_info < (2, 5, 0):
module = 'pysqlite2 module'
exc = e1
else:
module = 'either pysqlite2 or sqlite3 modules (tried in that order)'
raise ImproperlyConfigured("Error loading %s: %s" % (module, exc))
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
Database.register_converter("bool", lambda s: str(s) == '1')
Database.register_converter("time", util.typecast_time)
Database.register_converter("date", util.typecast_date)
Database.register_converter("datetime", util.typecast_timestamp)
Database.register_converter("timestamp", util.typecast_timestamp)
Database.register_converter("TIMESTAMP", util.typecast_timestamp)
Database.register_converter("decimal", util.typecast_decimal)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2,4,1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it.
Database.register_adapter(str, lambda s:s.decode('utf-8'))
Database.register_adapter(SafeString, lambda s:s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
def _supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return u'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def compiler(self, compiler_name):
return super(DatabaseOperations, self).compiler(compiler_name)
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return util.typecast_date(value)
elif internal_type == 'DateTimeField':
return util.typecast_timestamp(value)
elif internal_type == 'TimeField':
return util.typecast_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
self.connection.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
connection_created.send(sender=self.__class__, connection=self)
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
if is_analysis_running():
ana_call,ana_para = parameterize_limit(query, params)
ana_call = str(ana_call).strip()
self.cur_query = ana_call
mark_sql_call(ana_call, ana_para)
try:
return Database.Cursor.execute(self, query, params)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, param_list):
query = self.convert_query(query)
if is_analysis_running():
for params in param_list:
ana_call,ana_para = parameterize_limit(query, params)
ana_call = str(ana_call).strip()
self.cur_query = ana_call
mark_sql_call(ana_call, ana_para)
try:
return Database.Cursor.executemany(self, query, param_list)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def fetchmany(self, size=False):
self.in_call = True
val = Database.Cursor.fetchmany(self, size)
if is_analysis_running() and in_view() and (not is_analysis_paused()):# and len(val) == 1:
tval = [taint(row) for row in val]
for row in tval:
for val in row:
if not is_tainted(val) and val is not None:
print "untainted sql value: %s" % val
# assert is_tainted(val)
htoken.add_sql_value(self.cur_query, tval)
self.in_call = False
return tval
self.in_call = False
return val
def fetchall(self):
self.in_call = True
val = Database.Cursor.fetchall(self)
if is_analysis_running() and (not is_analysis_paused()) and \
in_view(): # and len(val) == 1:
tval = [taint(row) for row in val]
for row in tval:
for val in row:
if not is_tainted(val):
print "untainted sql value: %s" % val
htoken.add_sql_value(self.cur_query, tval)
self.in_call = False
return tval
self.in_call = False
return val
def fetchone(self):
if not hasattr(self, "in_call"):
self.in_call = False
val = Database.Cursor.fetchone(self)
if is_analysis_running() and in_view() and not is_analysis_paused()\
and not self.in_call:
tval = taint(val)
# assert is_tainted(tval)
htoken.add_sql_value(self.cur_query, [tval])
return tval
return val
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%','%')
def _sqlite_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
if isinstance(dt, datetime.datetime):
rv = dt.strftime("%Y-%m-%d %H:%M:%S")
if dt.microsecond:
rv = "%s.%0.6d" % (rv, dt.microsecond)
else:
rv = dt.strftime("%Y-%m-%d")
return rv
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
|
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Name: SerializeKiller
# Purpose: Finding vulnerable java servers
#
# Author: (c) John de Kroon, 2015
# Version: 1.0.2
# ------------------------------------------------------------------------------
import subprocess
import threading
import time
import socket
import sys
import argparse
import urllib2
import ssl
from socket import error as socket_error
from datetime import datetime
import thread
import time
mutex = thread.allocate_lock()
parser = argparse.ArgumentParser(
prog='serializekiller.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Scan for Java Deserialization vulnerability.")
parser.add_argument('--url', nargs='?', help="Scan a single URL")
parser.add_argument('file', nargs='?', help='File with targets')
args = parser.parse_args()
def saveToFile(result):
with open('result.txt', 'a') as f:
f.write(result)
f.close()
def nmap(host, *args):
global shellCounter
global threads
global target_list
# All ports to enumerate over for jboss, jenkins, weblogic, websphere
port_list = ['80', '81', '443', '444', '1099', '5005',
'7001', '7002', '8080', '8081', '8083', '8443',
'8880', '8888', '9000', '9080', '9443', '16200']
# Are there any ports defined for this host?
if not target_list[host]:
found = False
cmd = 'nmap --host-timeout 5 --open -p %s %s' % (','.join(port_list), host)
try:
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = p.communicate()
for this_port in port_list:
if out.find(this_port) >= 0:
if websphere(host, this_port) or weblogic(host, this_port) or jboss(host, this_port) or jenkins(host, this_port):
found = True
if found:
shellCounter += 1
except ValueError, v:
print " ! Something went wrong on host: %s: %s" % (host, v)
return
else:
for port in target_list[host]:
if websphere(
host,
port) or weblogic(
host,
port) or jenkins(
host,
port) or jboss(
host,
port):
shellCounter += 1
return
def websphere(url, port, retry=False):
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
output = urllib2.urlopen(
'https://' + url + ":" + port,
context=ctx,
timeout=8).read()
if "rO0AB" in output:
mutex.acquire()
print " - (possibly) Vulnerable Websphere: " + url + " (" + port + ")"
saveToFile('[+] Websphere: ' + url + ':' + port + '\n')
mutex.release()
return True
except urllib2.HTTPError as e:
if e.getcode() == 500:
if "rO0AB" in e.read():
mutex.acquire()
print " - (possibly) Vulnerable Websphere: " + url + " (" + port + ")"
saveToFile('[+] Websphere: ' + url + ':' + port + '\n')
mutex.release()
return True
except:
pass
try:
output = urllib2.urlopen(
'http://' + url + ":" + port,
timeout=3).read()
if "rO0AB" in output:
mutex.acquire()
print " - (possibly) Vulnerable Websphere: " + url + " (" + port + ")"
saveToFile('[+] Websphere: ' + url + ':' + port + '\n')
mutex.release()
return True
except urllib2.HTTPError as e:
if e.getcode() == 500:
if "rO0AB" in e.read():
mutex.acquire()
print " - (possibly) Vulnerable Websphere: " + url + " (" + port + ")"
saveToFile('[+] Websphere: ' + url + ':' + port + '\n')
mutex.release()
return True
except:
pass
# Used this part from https://github.com/foxglovesec/JavaUnserializeExploits
def weblogic(url, port):
try:
server_address = (url, int(port))
sock = socket.create_connection(server_address, 4)
sock.settimeout(2)
# Send headers
headers = 't3 12.2.1\nAS:255\nHL:19\nMS:10000000\nPU:t3://us-l-breens:7001\n\n'
sock.sendall(headers)
try:
data = sock.recv(1024)
except socket.timeout:
return False
sock.close()
if "HELO" in data:
mutex.acquire()
print " - (possibly) Vulnerable Weblogic: " + url + " (" + str(port) + ")"
saveToFile('[+] Weblogic: ' + url + ':' + str(port) + '\n')
mutex.release()
return True
return False
except socket_error:
return False
# Used something from https://github.com/foxglovesec/JavaUnserializeExploits
def jenkins(url, port):
try:
cli_port = False
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
try:
output = urllib2.urlopen('https://'+url+':'+port+"/jenkins/", context=ctx, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except urllib2.HTTPError, e:
if e.getcode() == 404:
try:
output = urllib2.urlopen('https://'+url+':'+port, context=ctx, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except:
pass
except:
pass
except:
mutex.acquire()
print " ! Could not check Jenkins on https. Maybe your SSL lib is broken."
mutex.release()
pass
if cli_port is not True:
try:
output = urllib2.urlopen('http://'+url+':'+port+"/jenkins/", timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except urllib2.HTTPError, e:
if e.getcode() == 404:
try:
output = urllib2.urlopen('http://'+url+':'+port, timeout=8).info()
cli_port = int(output['X-Jenkins-CLI-Port'])
except:
return False
except:
return False
# Open a socket to the CLI port
try:
server_address = (url, cli_port)
sock = socket.create_connection(server_address, 5)
# Send headers
headers = '\x00\x14\x50\x72\x6f\x74\x6f\x63\x6f\x6c\x3a\x43\x4c\x49\x2d\x63\x6f\x6e\x6e\x65\x63\x74'
sock.send(headers)
data1 = sock.recv(1024)
if "rO0AB" in data1:
mutex.acquire()
print " - (possibly) Vulnerable Jenkins: " + url + " (" + str(port) + ")"
saveToFile('[+] Weblogic: ' + url + ':' + str(port) + '\n')
mutex.release()
return True
else:
data2 = sock.recv(1024)
if "rO0AB" in data2:
mutex.acquire()
print " - (possibly) Vulnerable Jenkins: " + url + " (" + str(port) + ")"
saveToFile('[+] Jenkins: ' + ':' + str(port) + '\n')
mutex.release()
return True
except:
pass
return False
def jboss(url, port, retry=False):
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
output = urllib2.urlopen(
'https://' +
url +
':' +
port +
"/invoker/JMXInvokerServlet",
context=ctx,
timeout=8).read()
except:
try:
output = urllib2.urlopen(
'http://' +
url +
':' +
port +
"/invoker/JMXInvokerServlet",
timeout=8).read()
except:
# OK. I give up.
return False
if "\xac\xed\x00\x05" in output:
mutex.acquire()
print " - (possibly) Vulnerable JBOSS: " + url + " (" + port + ")"
saveToFile('[+] JBoss: ' + ':' + port + '\n')
mutex.release()
return True
return False
def urlStripper(url):
url = str(url.replace("https:", ''))
url = str(url.replace("http:", ''))
url = str(url.replace("\r", ''))
url = str(url.replace("\n", ''))
url = str(url.replace("/", ''))
return url
def read_file(filename):
f = open(filename)
content = f.readlines()
f.close()
return content
def worker():
global threads
content = read_file(args.file)
for line in content:
if ":" in line:
item = line.strip().split(':')
if item[0] not in target_list:
target_list[item[0]] = [item[1]]
else:
target_list[item[0]].append(item[1])
else:
if line.strip() not in target_list:
target_list[line.strip()] = []
print str(len(target_list)) + " targets found."
total_jobs = len(target_list)
current = 0
for host in target_list:
current += 1
while threading.active_count() > threads:
mutex.acquire()
print " ! We have more threads running than allowed. Current: {} Max: {}.".format(threading.active_count(), threads)
mutex.release()
if threads < 100:
threads += 1
sys.stdout.flush()
time.sleep(2)
mutex.acquire()
print " # Starting test {} of {} on {}.".format(current, total_jobs, host)
sys.stdout.flush()
mutex.release()
threading.Thread(target=nmap, args=(host, False, 1)).start()
# We're done!
while threading.active_count() > 2:
mutex.acquire()
print " # Waiting for everybody to come back. Still {} active.".format(threading.active_count() - 1)
sys.stdout.flush()
mutex.release()
time.sleep(4)
mutex.acquire()
print
print " => scan done. " + str(shellCounter) + " vulnerable hosts found."
print "Execution time: " + str(datetime.now() - startTime)
mutex.release()
exit()
if __name__ == '__main__':
startTime = datetime.now()
mutex.acquire()
print "Start SerializeKiller..."
print "This could take a while. Be patient."
print
mutex.release()
try:
ssl.create_default_context()
except:
print " ! WARNING: Your SSL lib isn't supported. Results might be incomplete."
pass
target_list = {}
shellCounter = 0
if args.url:
target_list[urlStripper(args.url)] = []
nmap(urlStripper(args.url))
elif args.file:
threads = 30
worker()
else:
mutex.acquire()
print "ERROR: Specify a file or a url!"
mutex.release()
|
|
import numpy as np
import time
import socket
import math
import random
import traceback
import serialWrapper
import packetBuilder
import packetParser
from collections import deque
from serial.tools import list_ports
##############################
## Hokuyo socket parameters ##
## Should go into main prog ##
##############################
TCP_IP = '192.168.0.10'
TCP_PORT = 10940
BUFFER_SIZE = 8192 #4096
VID = 1155
PID = 22336
SNR = '336234893534'
# Initialize socket connection
# HAS TO GO INTO MAIN PROG
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.connect((TCP_IP, TCP_PORT))
#time.sleep(0.1)
#s.send('BM\r')
#data = s.recv(BUFFER_SIZE)
#time.sleep(0.1)
#for i in xrange(3):
# s.send('GE0000108000\r')
# data = s.recv(BUFFER_SIZE)
# time.sleep(0.1)
# Initialize robot and particles
# HAS TO GO INTO MAIN PROG
#myrobot = Robot(True)
#myrobot.x, myrobot.y, myrobot.orientation = 1587.0, 349.0, 0
#p = [Robot(True) for i in xrange(N)]
N = 100
# Dimensions of the playing field
WORLD_X = 3000
WORLD_Y = 2000
# Beacon location: 1(left middle), 2(right lower), 3(right upper)
BEACONS = [(-56,1000),(3062,-56),(3055,2014)]
class Robot(object):
def __init__(self, first):
"""Initialize robot/particle with random position"""
if first:
self.x = random.gauss(155.3, 5)
self.y = random.gauss(755.5, 5)
self.orientation = random.gauss(0.0, 0.1)
def set(self, x_new, y_new, orientation_new):
"""Set particle position on the field"""
if 0 <= x_new <= WORLD_X:
self.x = x_new
else:
self.x = random.gauss(1587, 5)
if 0 <= y_new <= WORLD_Y:
self.y = y_new
else:
self.y = random.gauss(349, 5)
self.orientation = orientation_new % (2 * math.pi)
def move(self, delta):
"""Move particle by creating new one and setting position"""
# From robot I get relative position. And I can do new relative minus
# old relative to get displacement dx, dy, dtheta
#if delta[2] == 0:
x_new = self.x + delta[0] + random.gauss(0, 2)
y_new = self.y + delta[1] + random.gauss(0, 2)
orientation_new = self.orientation + delta[2] + random.gauss(0, 0.05)
new_robot = Robot(False)
new_robot.set(x_new, y_new, orientation_new)
return new_robot
def pose(self):
return self.x, self.y, self.orientation
def weight(self, x_rob, y_rob, BEACONS):
temp_beac = [(beacon[0] - self.x, beacon[1] - self.y) for beacon in BEACONS]
beacons = [(math.cos(self.orientation)*beac[0] + math.sin(self.orientation)*beac[1],
-math.sin(self.orientation)*beac[0] + math.cos(self.orientation)*beac[1])
for beac in temp_beac]
beacon = [0, 0, 0]
num_point = [0, 0, 0]
for j in xrange(len(x_rob)):
l1 = abs(math.sqrt((beacons[0][0] - x_rob[j])**2 +
(beacons[0][1] - y_rob[j])**2) - 40)
l2 = abs(math.sqrt((beacons[1][0] - x_rob[j])**2 +
(beacons[1][1] - y_rob[j])**2) - 40)
l3 = abs(math.sqrt((beacons[2][0] - x_rob[j])**2 +
(beacons[2][1] - y_rob[j])**2) - 40)
lmin = l1
num = 0
if l2 < lmin:
lmin = l2
num = 1
if l3 < lmin:
lmin = l3
num = 2
beacon[num] += lmin
num_point[num] += 1
median =[(beacon[i]/num_point[i]) for i in xrange(3) if num_point[i] != 0]
try:
return 1.0/sum(median)
except ZeroDivisionError:
return 0
def __str__(self):
return 'Particle pose: x = %.2f mm, y = %.2f mm, theta = %.2f deg' \
%(self.x, self.y, np.degrees(self.orientation))
###############################################################################
# Calculate robot orientation
def mean_angl(p, w):
x = 0
y = 0
for i in xrange(100):
x += w[i]*math.cos(p[i].orientation)
y += w[i]*math.sin(p[i].orientation)
angle = math.atan2(y,x)
if angle < 0:
return angle + (2*math.pi)
return angle
# Extract data form lidar scan
def lidar_scan(answer, pose):
answer = answer.split('\n')
dist = answer[2:-2]
dist2 = [item[:-1] for item in dist]
dist4 = ''.join(dist2)
step = 0
idxh = 3
polar_graph = deque()
append_pg = polar_graph.append
angle = deque()
append_a = angle.append
lend = len(dist4)
while idxh <= lend:
point = dist_val(dist4[idxh-3:idxh])
if dist_val(dist4[idxh:idxh+3]) > 1100 and point < 4000:
append_pg(point)
append_a(step)
idxh += 6
step += 0.004363323129985824
return angle, polar_graph
# Transforms lidar measurement to xy in robot coord sys
def p_trans(agl, pit):
x_rob = [pit[i] * math.cos(angle5(agl[i])) for i in xrange(len(agl))]
y_rob = [pit[i] * math.sin(angle5(agl[i])) for i in xrange(len(agl))]
return x_rob, y_rob
# Transforms lidar point angle in robot coord sys
def angle5(angle):
if angle >= math.pi/4:
return angle - math.pi/4
else:
return angle + 7*math.pi/4
# Calculate odometry elative motion
def relative_motion(old, computerPort, commands, lock):
"""Return robot current coordinates"""
#print 'old', old
time.sleep(0.05)
packet = packetBuilder.BuildPacket(commands.getCurentCoordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
new = recievedPacket.reply
#print 'new', new
return [(new[0]-old[0])*1000, (new[1]-old[1])*1000, new[2]-old[2]], new
# Calculate dist and angle from raw lidar data
def dist_val(value):
try:
return ((ord(value[0])-48)<<12)|((ord(value[1])-48)<<6)|(ord(value[2])-48)
except IndexError:
return 0
def connect_stm():
for port in list_ports.comports():
if (port.serial_number == SNR) and (port.pid == PID) and (port.vid == VID):
port = '/dev/' + port.name
else:
print 'No STM32 found. Aborting'
computerPort = serialWrapper.SerialWrapper(port)
commands = packetBuilder.CommandsList()
return computerPort, commands
# Localisation
def localisation(lock, shared, computerPort, commands):
lock = lock
shared = shared
computerPort = computerPort
commands = commands
#computerPort, commands = connect_stm()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
#print '2'
time.sleep(0.1)
s.send('BM\r')
data = s.recv(BUFFER_SIZE)
time.sleep(0.1)
for i in xrange(3):
s.send('GE0000108000\r')
data = s.recv(BUFFER_SIZE)
time.sleep(0.1)
myrobot = Robot(True)
myrobot.x, myrobot.y, myrobot.orientation = 155.3, 755.5, 0.0
p = [Robot(True) for i in xrange(N)]
old = [0.0, 0.0, 0.0]
try:
while 1:
rel_motion, old2 = relative_motion(old, computerPort, commands, lock)
#if abs(rel_motion[0]) < 0.001 and abs(rel_motion[1]) < 0.001 and abs(rel_motion[2]) < 0.000001:
# print 'Stopped moving'
# return myrobot.x, myrobot.y, myrobot.orientation
#start = time.time()
p2 = [p[i].move(rel_motion) for i in xrange(N)]
p = p2
old = old2
s.send('GE0000108000\r')
data_lidar = s.recv(BUFFER_SIZE)
angle, distance = lidar_scan(data_lidar, myrobot.pose())
x_rob, y_rob = p_trans(angle, distance)
w =[p[i].weight(x_rob, y_rob, BEACONS) for i in xrange(N)]
w = np.asarray(w)
w /= w.sum()
mean_orientation = mean_angl(p, w)
try:
mean_val = [(p[i].x*w[i], p[i].y*w[i]) for i in xrange(N)]
p3 = np.random.choice(p, N, p = w)
p = list(p3)
center = np.sum(mean_val, axis = 0)
myrobot.set(center[0], center[1], mean_orientation)
except:
pass
with shared.get_lock():
shared[0] = myrobot.x
shared[1] = myrobot.y
shared[2] = myrobot.orientation
#end = time.time()
#print start - end
except:
traceback.print_exc()
s.shutdown(2)
s.close()
#try:
# myrobot = Robot(True)
# myrobot.x, myrobot.y, myrobot.orientation = 524.0, 225.0, 0
# p = [Robot(True) for i in xrange(N)]
# localisation(myrobot, p, s)
#except:
# traceback.print_exc()
# s.shutdown(2)
# s.close()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import subprocess
import time
import unittest
from contextlib import suppress
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import psutil
import pytest
from airflow.exceptions import AirflowException
from airflow.utils import process_utils
from airflow.utils.process_utils import check_if_pidfile_process_is_running, execute_in_subprocess
class TestReapProcessGroup(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(unused_signum, unused_frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(
target=TestReapProcessGroup._ignores_sigterm, args=[child_pid, child_setup_done]
)
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
try:
parent.start()
assert parent_setup_done.acquire(timeout=5.0)
assert psutil.pid_exists(parent_pid.value)
assert psutil.pid_exists(child_pid.value)
process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)
assert not psutil.pid_exists(parent_pid.value)
assert not psutil.pid_exists(child_pid.value)
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesn't work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesn't work here
except OSError:
pass
class TestExecuteInSubProcess:
def test_should_print_all_messages1(self, caplog):
execute_in_subprocess(["bash", "-c", "echo CAT; echo KITTY;"])
msgs = [record.getMessage() for record in caplog.records]
assert ["Executing cmd: bash -c 'echo CAT; echo KITTY;'", 'Output:', 'CAT', 'KITTY'] == msgs
def test_should_print_all_messages_from_cwd(self, caplog, tmp_path):
execute_in_subprocess(["bash", "-c", "echo CAT; pwd; echo KITTY;"], cwd=str(tmp_path))
msgs = [record.getMessage() for record in caplog.records]
assert [
"Executing cmd: bash -c 'echo CAT; pwd; echo KITTY;'",
'Output:',
'CAT',
str(tmp_path),
'KITTY',
] == msgs
def test_should_raise_exception(self):
with pytest.raises(CalledProcessError):
process_utils.execute_in_subprocess(["bash", "-c", "exit 1"])
def my_sleep_subprocess():
sleep(100)
def my_sleep_subprocess_with_signals():
signal.signal(signal.SIGINT, lambda signum, frame: None)
signal.signal(signal.SIGTERM, lambda signum, frame: None)
sleep(100)
class TestKillChildProcessesByPids(unittest.TestCase):
def test_should_kill_process(self):
before_num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
process = multiprocessing.Process(target=my_sleep_subprocess, args=())
process.start()
sleep(0)
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process + 1 == num_process
process_utils.kill_child_processes_by_pids([process.pid])
num_process = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().count("\n")
assert before_num_process == num_process
def test_should_force_kill_process(self):
process = multiprocessing.Process(target=my_sleep_subprocess_with_signals, args=())
process.start()
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) in map(lambda x: x.strip(), all_processes)
with self.assertLogs(process_utils.log) as cm:
process_utils.kill_child_processes_by_pids([process.pid], timeout=0)
assert any("Killing child PID" in line for line in cm.output)
sleep(0)
all_processes = subprocess.check_output(["ps", "-ax", "-o", "pid="]).decode().splitlines()
assert str(process.pid) not in map(lambda x: x.strip(), all_processes)
class TestPatchEnviron(unittest.TestCase):
def test_should_update_variable_and_restore_state_when_exit(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
def test_should_restore_state_when_exception(self):
with mock.patch.dict("os.environ", {"TEST_NOT_EXISTS": "BEFORE", "TEST_EXISTS": "BEFORE"}):
del os.environ["TEST_NOT_EXISTS"]
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
with suppress(AirflowException):
with process_utils.patch_environ({"TEST_NOT_EXISTS": "AFTER", "TEST_EXISTS": "AFTER"}):
assert "AFTER" == os.environ["TEST_NOT_EXISTS"]
assert "AFTER" == os.environ["TEST_EXISTS"]
raise AirflowException("Unknown exception")
assert "BEFORE" == os.environ["TEST_EXISTS"]
assert "TEST_NOT_EXISTS" not in os.environ
class TestCheckIfPidfileProcessIsRunning(unittest.TestCase):
def test_ok_if_no_file(self):
check_if_pidfile_process_is_running('some/pid/file', process_name="test")
def test_remove_if_no_process(self):
# Assert file is deleted
with pytest.raises(FileNotFoundError):
with NamedTemporaryFile('+w') as f:
f.write('19191919191919191991')
f.flush()
check_if_pidfile_process_is_running(f.name, process_name="test")
def test_raise_error_if_process_is_running(self):
pid = os.getpid()
with NamedTemporaryFile('+w') as f:
f.write(str(pid))
f.flush()
with pytest.raises(AirflowException, match="is already running under PID"):
check_if_pidfile_process_is_running(f.name, process_name="test")
|
|
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains APIs to facilitate Imc backup and import
"""
import time
from ..imcexception import ImcValidationException
def backup_create(handle, remote_host, remote_file, protocol, username, password,
passphrase, timeout_in_sec=600, entity="CMC", **kwargs):
"""
backup_create helps create and download Imc backups.
Args:
handle (ImcHandle): Imc Connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
timeout_in_sec (number) : time in seconds for which method waits
for the backUp file to generate before it exits.
entity (str): For C3260 platforms:
"CMC" for backup of chassis related configuration and state
"CIMC1" for backup of server-1 related configuration and state
"CIMC2" for backup of server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_create(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_create(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.mgmt.MgmtBackup import MgmtBackup, MgmtBackupConsts
from ..mometa.top.TopSystem import TopSystem
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
top_system = TopSystem()
parent_mo = None
mgmt_backup = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_backup = MgmtBackup(parent_mo_or_dn=parent_mo)
mgmt_backup.hostname = remote_host
mgmt_backup.remote_file = remote_file
mgmt_backup.user = username
mgmt_backup.pwd = password
mgmt_backup.passphrase = passphrase
mgmt_backup.proto = protocol
mgmt_backup.admin_state = MgmtBackupConsts.ADMIN_STATE_ENABLED
mgmt_backup.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_backup.entity = entity
handle.add_mo(mgmt_backup, modify_present=True)
# Checking for the backup to complete.
time.sleep(10)
duration = timeout_in_sec
poll_interval = 2
download_status = False
while not download_status:
mgmt_backup = handle.query_dn(dn=mgmt_backup.dn)
admin_state_temp = mgmt_backup.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtBackupConsts.ADMIN_STATE_DISABLED:
if mgmt_backup.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_backup.fsm_stage_descr == "Error":
raise ImcValidationException("Failed to export the CIMC "
"configuration file." +
"Error Code: " +
mgmt_backup.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_backup.fsm_rmt_inv_err_descr)
if download_status:
break
time.sleep(min(duration, poll_interval))
duration = max(0, (duration - poll_interval))
if duration == 0:
handle.remove_mo(mgmt_backup)
raise ImcValidationException('backup_create timed out')
def backup_import(handle, remote_host, remote_file, protocol, username,
password, passphrase, entity="CMC", **kwargs):
"""
This operation uploads a Imc backup taken earlier via GUI
or backup_create operation for all configuration, system configuration,
and logical configuration files. User can perform an import while the
system is up and running.
Args:
handle (ImcHandle): connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
entity (str): For C3260 platforms:
"CMC" for importing chassis related configuration and state
"CIMC1" for importing server-1 related configuration and state
"CIMC2" for importing server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_import(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_import(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.top.TopSystem import TopSystem
from ..mometa.mgmt.MgmtImporter import MgmtImporter, MgmtImporterConsts
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
# create MgmtImporter
top_system = TopSystem()
parent_mo = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_importer = MgmtImporter(parent_mo_or_dn=parent_mo)
mgmt_importer.hostname = remote_host
mgmt_importer.remote_file = remote_file
mgmt_importer.proto = protocol
mgmt_importer.user = username
mgmt_importer.pwd = password
mgmt_importer.passphrase = passphrase
mgmt_importer.admin_state = MgmtImporterConsts.ADMIN_STATE_ENABLED
mgmt_importer.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_importer.entity = entity
handle.add_mo(mgmt_importer, modify_present=True)
time.sleep(10)
download_status = False
while not download_status:
mgmt_importer = handle.query_dn(dn=mgmt_importer.dn)
admin_state_temp = mgmt_importer.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtImporterConsts.ADMIN_STATE_DISABLED:
if mgmt_importer.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_importer.fsm_stage_descr == "Error":
raise ImcValidationException(
"Failed to import the CIMC "
"configuration file." +
"Error Code: " +
mgmt_importer.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_importer.fsm_rmt_inv_err_descr)
if download_status:
break
return mgmt_importer
|
|
from collections import OrderedDict
import numpy
from numpy.testing import assert_raises, assert_equal
from picklable_itertools import repeat
from six.moves import zip, range, cPickle
from fuel.datasets import Dataset, IterableDataset, IndexableDataset
from fuel.streams import DataStream
from fuel.schemes import ShuffledScheme, BatchSizeScheme, ConstantScheme
from fuel.transformers import Mapping
class TestDataset(object):
def setUp(self):
self.data = [1, 2, 3]
self.stream = DataStream(IterableDataset(self.data))
def test_one_example_at_a_time(self):
assert_equal(
list(self.stream.get_epoch_iterator()), list(zip(self.data)))
def test_multiple_epochs(self):
for i, epoch in zip(range(2), self.stream.iterate_epochs()):
assert list(epoch) == list(zip(self.data))
def test_as_dict(self):
assert_equal(
next(self.stream.get_epoch_iterator(as_dict=True)), {"data": 1})
def test_value_error_on_no_provided_sources(self):
class FaultyDataset(Dataset):
def get_data(self, state=None, request=None):
pass
assert_raises(ValueError, FaultyDataset, self.data)
def test_value_error_on_nonexistent_sources(self):
def instantiate_dataset():
return IterableDataset(self.data, sources=('dummy',))
assert_raises(ValueError, instantiate_dataset)
def test_default_transformer(self):
class DoublingDataset(IterableDataset):
def apply_default_transformer(self, stream):
return Mapping(
stream, lambda sources: tuple(2 * s for s in sources))
dataset = DoublingDataset(self.data)
stream = dataset.apply_default_transformer(DataStream(dataset))
assert_equal(list(stream.get_epoch_iterator()), [(2,), (4,), (6,)])
def test_no_axis_labels(self):
assert IterableDataset(self.data).axis_labels is None
def test_axis_labels(self):
axis_labels = {'data': ('batch',)}
dataset = IterableDataset(self.data, axis_labels=axis_labels)
assert dataset.axis_labels == axis_labels
def test_attribute_error_on_no_example_iteration_scheme(self):
class FaultyDataset(Dataset):
provides_sources = ('data',)
def get_data(self, state=None, request=None):
pass
def get_example_iteration_scheme():
return FaultyDataset().example_iteration_scheme
assert_raises(AttributeError, get_example_iteration_scheme)
def test_example_iteration_scheme(self):
scheme = ConstantScheme(2)
class MinimalDataset(Dataset):
provides_sources = ('data',)
_example_iteration_scheme = scheme
def get_data(self, state=None, request=None):
pass
assert MinimalDataset().example_iteration_scheme is scheme
def test_filter_sources(self):
dataset = IterableDataset(
OrderedDict([('1', [1, 2]), ('2', [3, 4])]), sources=('1',))
assert_equal(dataset.filter_sources(([1, 2], [3, 4])), ([1, 2],))
class TestIterableDataset(object):
def test_value_error_on_non_iterable_dict(self):
assert_raises(ValueError, IterableDataset, {'x': None, 'y': None})
def test_value_error_on_non_iterable(self):
assert_raises(ValueError, IterableDataset, None)
def test_value_error_get_data_none_state(self):
assert_raises(
ValueError, IterableDataset([1, 2, 3]).get_data, None, None)
def test_value_error_get_data_request(self):
assert_raises(
ValueError, IterableDataset([1, 2, 3]).get_data, [1, 2, 3], True)
class TestIndexableDataset(object):
def test_getattr(self):
assert_equal(getattr(IndexableDataset({'a': (1, 2)}), 'a'), (1, 2))
def test_value_error_on_non_iterable(self):
assert_raises(ValueError, IterableDataset, None)
def test_value_error_get_data_state(self):
assert_raises(
ValueError, IndexableDataset([1, 2, 3]).get_data, True, [1, 2])
def test_value_error_get_data_none_request(self):
assert_raises(
ValueError, IndexableDataset([1, 2, 3]).get_data, None, None)
def test_pickling(self):
cPickle.loads(cPickle.dumps(IndexableDataset({'a': (1, 2)})))
def test_batch_iteration_scheme_with_lists(self):
"""Batch schemes should work with more than ndarrays."""
data = IndexableDataset(OrderedDict([('foo', list(range(50))),
('bar', list(range(1, 51)))]))
stream = DataStream(data,
iteration_scheme=ShuffledScheme(data.num_examples,
5))
returned = [sum(batches, []) for batches in
zip(*list(stream.get_epoch_iterator()))]
assert set(returned[0]) == set(range(50))
assert set(returned[1]) == set(range(1, 51))
def test_sources_selection():
features = [5, 6, 7, 1]
targets = [1, 0, 1, 1]
stream = DataStream(IterableDataset(OrderedDict(
[('features', features), ('targets', targets)])))
assert list(stream.get_epoch_iterator()) == list(zip(features, targets))
stream = DataStream(IterableDataset(
{'features': features, 'targets': targets},
sources=('targets',)))
assert list(stream.get_epoch_iterator()) == list(zip(targets))
def test_data_driven_epochs():
class TestDataset(IterableDataset):
sources = ('data',)
def __init__(self):
self.axis_labels = None
self.data = [[1, 2, 3, 4],
[5, 6, 7, 8]]
def open(self):
epoch_iter = iter(self.data)
data_iter = iter(next(epoch_iter))
return (epoch_iter, data_iter)
def next_epoch(self, state):
try:
data_iter = iter(next(state[0]))
return (state[0], data_iter)
except StopIteration:
return self.open()
def get_data(self, state, request):
data = []
for i in range(request):
data.append(next(state[1]))
return (data,)
epochs = []
epochs.append([([1],), ([2],), ([3],), ([4],)])
epochs.append([([5],), ([6],), ([7],), ([8],)])
stream = DataStream(TestDataset(), iteration_scheme=ConstantScheme(1))
assert list(stream.get_epoch_iterator()) == epochs[0]
assert list(stream.get_epoch_iterator()) == epochs[1]
assert list(stream.get_epoch_iterator()) == epochs[0]
stream.reset()
for i, epoch in zip(range(2), stream.iterate_epochs()):
assert list(epoch) == epochs[i]
# test scheme resetting between epochs
class TestScheme(BatchSizeScheme):
def get_request_iterator(self):
return iter([1, 2, 1, 3])
epochs = []
epochs.append([([1],), ([2, 3],), ([4],)])
epochs.append([([5],), ([6, 7],), ([8],)])
stream = DataStream(TestDataset(), iteration_scheme=TestScheme())
for i, epoch in zip(range(2), stream.iterate_epochs()):
assert list(epoch) == epochs[i]
def test_num_examples():
assert_raises(ValueError, IterableDataset,
{'features': range(10), 'targets': range(7)})
dataset = IterableDataset({'features': range(7),
'targets': range(7)})
assert dataset.num_examples == 7
dataset = IterableDataset(repeat(1))
assert numpy.isnan(dataset.num_examples)
x = numpy.random.rand(5, 3)
y = numpy.random.rand(5, 4)
dataset = IndexableDataset({'features': x, 'targets': y})
assert dataset.num_examples == 5
assert_raises(ValueError, IndexableDataset,
{'features': x, 'targets': y[:4]})
|
|
import collections
import datetime
import random
import re
import StringIO
import sys
import bio
import features
import vcf
DELETE_BASE = '-'
class ProbabilisticFasta(object):
'''
generate fasta with probabilities attached from mapped fragments
'''
def __init__( self, log=None ):
self.genome = {} # counts over position { 'A': [ 1, 0, 2, ... ], 'G': [ 2, 2, 0, ... ] } roughly means [AGG][GG][AA]...
self.insertions = {} # { 0: { 'AAA': 1, 'BB': 2 } } means at position 0, insertion of AAA with 1 confidence; BB with 2 confidence
self.inserted = {}
self.deleted = set()
self.prior = {} # overall counts
self.total = 0 # total # of nucleotides
self.length = 0 # length of genome
self.log = log
def add( self, fragment, start, confidence=1.0, debug=None ):
'''
process a directly mapped fragment of dna
@fragment: the piece of dna
@start: where in the reference genome it starts
@confidence: how confident in this mapping
'''
for i in xrange(0, len(fragment) ):
value = fragment[i]
position = start + i
if value not in self.genome:
self.genome[value] = []
self.prior[value] = 0
if len(self.genome[value]) < position + 1:
self.genome[value].extend( [0] * ( position - len(self.genome[value]) + 1 ) )
if len(self.genome[value]) > self.length:
self.length = len(self.genome[value])
#self.log( "adding confidence %f to %s at %i; start %i" % ( confidence, value, position, start ) )
#self.log( "capacity %i" % ( len(self.genome[value] ) ) )
if position >= 0: # mappers can clip off the end
self.genome[value][position] += confidence
self.prior[value] += 1
self.total += 1
#if position == 39415: # debugging
# self.log( 'start %i: added %f to %s, total %f, line %s' % ( start, confidence, value, self.genome[value][position], debug ) )
def insert( self, fragment, start, confidence=1.0 ):
'''
@fragment: the piece of dna to insert
@start: where the insertion starts on the reference
@confidence: how confident in this insertion
'''
if start not in self.insertions:
self.insertions[start] = {}
if fragment not in self.insertions[start]:
self.insertions[start] = { fragment: 0 }
self.insertions[start][fragment] += confidence
def delete( self, start, count, confidence=1.0 ):
fragment = DELETE_BASE * count
self.add( fragment, start, confidence )
def count( self, position ):
'''
counts of nucleotides seen at a position
e.g. { 'A': 10, 'G': 4 }
'''
result = collections.defaultdict(int)
for value in self.genome:
if len(self.genome[value]) > position:
result[value] = self.genome[value][position]
return result
def confidence( self, position ):
'''
returns probabilities of nucleotides
TODO priors
'''
counts = self.count( position ) # e.g. { 'A': 4, 'C': 1 }
denominator = sum( counts[k] for k in counts ) + len(self.prior) # laplacian smoothing
probabilities = {}
for count in counts:
probabilities[count] = ( counts[count] + 1.0 ) / denominator
return probabilities
def consensus_at( self, i=0, call_strategy='consensus' ):
'''
returns the majority base at given position i, or N if no coverage (move, best, confidence, coverage)
@return move, result, best, coverage
- move: how many bases to move candidate (1 is normal)
- result: best variation
- best: confidence of this variation
- coverage: total # reads covering this base
'''
best = 'N' # default if no coverage
best_value = 0
coverage = 0
result = ''
move = 0
# consensus at this base
for value in self.genome: # i.e. A, C, G, T
if i < len(self.genome[value]): # check in range
coverage += self.genome[value][i]
if self.genome[value][i] > best_value:
best_value = self.genome[value][i] # best count
best = value # best base
#self.log( 'consensus at %i: best %s: %i of %i' % ( i, best, best_value, coverage ) )
# find consensus insertion
if i in self.insertions:
# pick highest
best_insertion = None
best_insertion_value = 0
insertion_coverage = 0
for insertion in self.insertions[i]:
insertion_coverage += self.insertions[i][insertion]
if best_insertion is None or self.insertions[i][insertion] > best_insertion_value:
best_insertion = insertion
best_insertion_value = self.insertions[i][insertion]
if best_insertion is not None and ( call_strategy == 'aggressive' or best_insertion_value > ( coverage - insertion_coverage ) ):
result = best_insertion
self.inserted[i] = best_insertion
#if self.log:
# self.log( 'included insertion %s at ref %i' % (best_insertion, i) )
move = len(best_insertion)
else:
pass
if self.log:
pass #self.log( 'skipped insertion at %i with val %f with noinsert %f' % ( i, best_insertion_value, coverage - insertion_coverage ) )
# was the consensus to delete?
if best == DELETE_BASE:
self.deleted.add(i)
#if self.log:
# self.log( 'deletion at %i' % (i) )
else:
result += best
move += 1
return (move, result, best_value, coverage) # TODO only returning confidence for variation, not indel
def consensus_count( self, start=0, count=1 ):
'''
get the next count characters from start
can return more than count if insertions at the end of the segment
can return less than count if at end of string
'''
result = ''
total_move = 0
reference_move = 0
while len(result) <= count and start + reference_move < self.length:
move, s, confidence, coverage = self.consensus_at( start + reference_move )
result += s
reference_move += 1
#self.log( 'consensus for %i count %i is %s (%i)' % (start, count, result, len(result)) )
return (reference_move, result)
def consensus( self, start=0, end=-1 ):
'''
return the corresponding string for the segment specified.
returned segment can be longer or shorter than end-start - segment applies to the reference.
inclusive of start, exclusive of end
'''
if end == -1 or end > self.length:
end = self.length
result = ''
self.insertion_count = 0
for i in xrange(start, end):
move, add, confidence, coverage = self.consensus_at( i )
result += add
#self.log( 'consensus for %i to %i is %s (%i)' % (start, end, result, len(result)) )
return result
class MultiFastaMutate(object):
def __init__( self, multi_reader, log=bio.log_stderr, vcf_file=None, snp_prob=0.01, insert_prob=0.01, delete_prob=0.01, min_insert_len=1, max_insert_len=1, min_delete_len=1, max_delete_len=1, min_variation_dist=0, min_variation_start=0, probabilistic=True, insert_source='random', allow_end_mutate=False, tandem_count=1 ):
for reader in multi_reader.items():
FastaMutate( reader, log, vcf_file, snp_prob, insert_prob, delete_prob, min_insert_len, max_insert_len, min_delete_len, max_delete_len, min_variation_dist, min_variation_start, probabilistic, insert_source, allow_end_mutate, tandem_count=1 )
class FastaMutate(object):
'''
change a reference fasta
'''
def __init__( self, reader, log=bio.log_stderr, vcf_file=None, snp_prob=0.01, insert_prob=0.01, delete_prob=0.01, min_insert_len=1, max_insert_len=1, min_delete_len=1, max_delete_len=1, min_variation_dist=0, min_variation_start=0, probabilistic=True, insert_source='random', allow_end_mutate=False, probabilities='AACCCTTGGG', tandem_count=1, max_variation_count=1e6 ):
'''
@reader: FastaReader
@vcf_file: write mutations to vcf
'''
self.reader = reader
self.snp_prob = snp_prob
self.insert_prob = insert_prob
self.delete_prob = delete_prob
self.min_insert_len = min_insert_len
self.max_insert_len = max_insert_len
self.min_delete_len = min_delete_len
self.max_delete_len = max_delete_len
self.min_variation_dist = min_variation_dist
self.min_variation_start = min_variation_start
self.deletion_remain = 0
self.mutations = 0
self.vcf_file = vcf_file
self.probabilistic = probabilistic
self.probabilities = probabilities
self.insert_source = insert_source
self.tandem_count = tandem_count
self.max_variation_count = max_variation_count
self.variation_count = 0
self.insert_source_data = ''
if vcf_file is not None:
self.vcf = vcf.VCF( writer=vcf.VCFWriter(vcf_file) )
else:
self.vcf = vcf.VCF()
self.pos = 0
self.last_variation_pos = None # this is the end position of the last variation
self.allow_end_mutate = allow_end_mutate
seed = random.randint(0, sys.maxint)
random.seed(seed)
if log is not None:
log( 'seed: %i; insert_source %s' % (seed, insert_source) )
self.log = log
def items(self):
while True:
fragment = self.reader.next_item()
if fragment is None: # no more fragments
if self.deletion_remain > 0:
self.end_deletion()
break
if self.insert_source == 'repeat':
self.insert_source_data += str(fragment)
elif self.insert_source == 'tandem':
self.insert_source_data += str(fragment)
if len(self.insert_source_data) > len(str(fragment)) + self.max_insert_len: # keep this fragment + length
self.insert_source_data = self.insert_source_data[-len(str(fragment)) - self.max_insert_len:]
#self.log( 'insert_source_data: %s' % self.insert_source_data )
# apply mutations
if self.allow_end_mutate or self.reader.has_next_item(): # don't mutate last fragment
fragment = self.mutate( fragment )
yield fragment
def add_snp(self, c):
'''
generates a single snp, adds to vcf, returns new base
'''
new_c = self.probabilities[random.randint(0, len(self.probabilities)-1)]
while new_c == c:
new_c = self.probabilities[random.randint(0, len(self.probabilities)-1)]
self.mutations += 1
if self.vcf is not None:
self.vcf.snp( self.pos, c, new_c )
return new_c
def add_insertion(self, c, fragment_pos, fragment_len ):
'''
generates a new insertion and returns it
insertion gets placed before current base
'''
insert_len = random.randint(self.min_insert_len, self.max_insert_len) # decide insertion len
# generate actual insertion
if self.insert_source == 'repeat' and len(self.insert_source_data) >= insert_len:
#self.log( 'choosing repeated fragment up to %i' % len(self.insert_source_data) )
fragment_start = random.randint(0, len(self.insert_source_data) - insert_len)
new_c = self.insert_source_data[fragment_start:fragment_start + insert_len]
#self.log( "repeat %s from %i:%i" % ( new_c, fragment, position ) )
elif self.insert_source == 'tandem' and len(self.insert_source_data) >= insert_len + fragment_len:
#self.log( 'choosing tandem fragment up to %i from %s' % ( len(self.insert_source_data), self.insert_source_data ) )
# want the insert_len chars directly before fragment_pos, where data is len fragment_len + insert_len
dist_from_end = fragment_len - fragment_pos
new_c = self.insert_source_data[ -insert_len -dist_from_end: -dist_from_end ]
elif self.insert_source == 'random':
new_c = ''
while insert_len > 0:
insert_len -= 1
new_c += self.probabilities[random.randint(0, len(self.probabilities)-1)]
#self.log( "random %s" % ( new_c ) )
else: #if self.insert_source == 'simple':
possibles = 'ACGT'
new_c = possibles[random.randint(0, len(possibles)-1)] * insert_len
#elif self.insert_source == 'novel':
# pass
self.mutations += 1
# add to vcf
if self.vcf is not None:
self.vcf.indel( self.pos, self.previous + c, self.previous + new_c + c )
return new_c
def add_deletion(self, c):
self.mutations += 1
self.deletion_remain = random.randint(self.min_delete_len, self.max_delete_len)
self.deleted = ''
self.deletion_start = self.pos
self.deletion_previous = self.previous
self.continue_deletion( c )
def continue_deletion(self, c):
self.deletion_remain -= 1
self.deleted += c
if self.deletion_remain == 0:
self.end_deletion()
def end_deletion(self):
if self.vcf is not None:
self.vcf.indel( self.deletion_start - 1, self.deletion_previous + self.deleted, self.deletion_previous )
def mutate(self, fragment):
result = ''
for fragment_pos, c in enumerate(fragment): # iterate over each base in fragment
#self.log( 'evaluating %s' % c )
if self.deletion_remain > 0:
self.continue_deletion( c )
elif self.probabilistic:
# snp
if self.variation_count < self.max_variation_count and random.uniform(0, 1) < self.snp_prob and self.pos >= self.min_variation_start and ( self.last_variation_pos is None or self.last_variation_pos + self.min_variation_dist <= self.pos ):
new_c = self.add_snp( c )
#self.log( 'added snp at %i' % self.pos )
result += new_c
self.last_variation_pos = self.pos
self.variation_count += 1
# insert
elif self.variation_count < self.max_variation_count and random.uniform(0, 1) < self.insert_prob and self.pos >= self.min_variation_start and self.pos > self.max_insert_len and ( self.last_variation_pos is None or self.last_variation_pos + self.min_variation_dist <= self.pos ): # TODO reads can get -ve reference
new_c = self.add_insertion( c, fragment_pos, len(fragment) )
#self.log( 'added insertion at %i' % self.pos )
result += new_c + c # insertion gets placed before current base
self.last_variation_pos = self.pos - 1 # -1 because insertion is placed before current
self.variation_count += 1
# delete
elif self.variation_count < self.max_variation_count and self.pos > 0 and random.uniform(0, 1) < self.delete_prob and self.pos >= self.min_variation_start and ( self.last_variation_pos is None or self.last_variation_pos + self.min_variation_dist <= self.pos ):
self.add_deletion( c )
#self.log( 'added deletion at %i' % self.pos )
self.last_variation_pos = self.pos + self.deletion_remain
self.variation_count += 1
# no mutation
else:
result += c
else: # deterministic
pass
self.pos += 1
self.previous = c
return result
class FastaDiff(object):
'''
compare fastas
'''
def __init__( self, reader, candidate, log, vcf=None ):
'''
@reader: FastaReader of known donor
@candidate: ProbabilisticFasta of imputed genome
@vcf: use to keep fastas properly aligned
'''
self.candidate_pos = 1
self.last_log_pos = 0
self.last_error_total_logged = 0
self.errors = {}
self.error_positions = set() # set of incorrect positions
self.error_total = 0
self.candidate = candidate
self.log = log
self.vcf = vcf
candidate_remainder = ''
for item in reader.items():
# gets the corresponding segment from the candidate
move, candidate_item = candidate.consensus_count( self.candidate_pos, len(item) )
candidate_item = candidate_remainder + candidate_item
candidate_remainder = candidate_item[len(item):]
#self.log( 'truth: %s pred: %s' % ( item, candidate_item ) ) # debug
if candidate_item != item:
# find differences
self.find_differences( item, candidate_item, self.candidate_pos )
self.candidate_pos += move #len(item)
if self.candidate_pos < 10000 and self.candidate_pos - self.last_log_pos >= 1000 or self.candidate_pos - self.last_log_pos >= 10000:
self.log_error()
def log_error( self, i=0 ):
self.log( 'processed cand %i items - %i errors' % ( self.candidate_pos + i, self.error_total ) )
self.last_log_pos = self.candidate_pos
self.last_error_total_logged = self.error_total
def find_differences( self, item, candidate, start ):
for i in xrange(0, min(len(item), len(candidate))):
if item[i] != candidate[i]:
self.error_total += 1
if self.error_total < 100 and self.error_total != self.last_error_total_logged:
self.log_error(i)
key = '%s->%s' % ( item[i], candidate[i] )
if self.error_total < 100 or self.error_total % 1000 == 0:
self.log( 'error: actual->predicted %s at cand %i + %i (%i): counts %s\ntruth: %s\npred: %s' % ( key, start, i, start + i, self.candidate.count( start + i ), item, candidate ) )
if key not in self.errors:
self.errors[key] = 0
self.errors[key] += 1
self.error_positions.add( start + i )
if len(item) != len(candidate):
key = 'length'
if key not in self.errors:
self.errors[key] = 0
self.errors[key] += 1
class FastaLength(object):
def __init__(self, reader):
self.reader = reader
self.length = 0
for item in self.reader.items():
self.length += len(item)
class Fasta(object):
'''
keeps the whole fasta in memory for random access
'''
def __init__(self, reader):
'''
@reader: FastaReader
'''
self.reader = reader
self._length = None
self.fasta = ''
@property
def length(self):
if self._length is None:
# read the whole thing
for item in self.reader.items():
self.fasta += item
self._length = len(self.fasta)
return self._length
def base_at(self, i):
if i >= len(self.fasta):
for item in self.reader.items():
self.fasta += item
if i < len(self.fasta): # success
break
if i < len(self.fasta):
return self.fasta[i]
else:
return None
def fragment(self, start=0, end=-1 ):
if end == -1:
end = self.length
result = ''
for i in xrange(start, end):
result += self.base_at(i)
return result
def read_to( self, pos ):
'''
read up to pos; return false if unable
'''
if pos >= len(self.fasta):
for item in self.reader.items():
self.fasta += item
if pos < len(self.fasta): # success
return True
return False # failed
else:
return True
class FastaReaderFromVCF(object):
def __init__(self, fasta, vcf):
self.reader = fasta
self.vcf = vcf
self.pos = 0
def items(self):
while True:
next_fragment = self.next_item()
if next_fragment is None:
break
else:
yield next_fragment
def next_item(self):
fragment = self.reader.next_item()
if fragment is None:
return None
else:
fragment = list(fragment)
new_pos = self.pos + len(fragment)
for i in xrange(self.pos, new_pos):
if i in self.vcf.snp_map:
fragment[i - self.pos] = self.vcf.snp_list[self.vcf.snp_map[i]]['alt']
self.pos = new_pos
return ''.join(fragment)
class FastaReader(object):
'''
yields fragments from a fasta file object
>>> import StringIO
>>> g = StringIO.StringIO( '>\nabc\ndef' )
>>> p = bio.FastaReader( g )
>>> [ f for f in p.items() ]
['abc', 'def']
'''
def __init__(self, genome, include_headers=False):
self.genome = genome
self.name = None
self.has_next = True
self.include_headers = include_headers
self.future_fragment = self._next_item()
def items(self):
while True:
current_fragment = self.future_fragment
if current_fragment is None:
break
self.future_fragment = self._next_item()
self.has_next = self.future_fragment is not None
yield current_fragment
#def items(self):
# while True:
# current_fragment = self.next_item()
# if current_fragment is None:
# break
# yield current_fragment
def next_item(self):
current_fragment = self.future_fragment
self.future_fragment = self._next_item()
self.has_next = self.future_fragment is not None
return current_fragment
def _next_item(self):
for line in self.genome:
if line.startswith( '>' ):
self.name = line[1:].strip()
if line.startswith( '>' ) and not self.include_headers:
pass
else:
return line.strip()
return None
def has_next_item(self):
return self.has_next
def __repr__(self):
return "name: %s" % self.name
class FastaStats(object):
'''calculate some overall stats for a fasta file'''
def __init__( self, fasta, read_length=100, log=bio.log_stderr ):
'''
@fasta: file handle
'''
self.stats = { 'count': 0, 'gc': [], 'entropy': [] }
self.base_counter = collections.Counter()
current = ''
lines = 0
for line in fasta:
line = line.strip()
if not line.startswith( '>' ):
self.stats['count'] += len(line) # total fasta length
current += line
self.base_counter.update(line)
if len(current) >= read_length:
add = len(current) - read_length
for idx in xrange(0, add):
feature = features.ReadFeature( current[idx:idx+read_length] )
self.stats['gc'].append( feature.gc() )
self.stats['entropy'].append( feature.entropy() )
current = current[add:]
lines += 1
if lines % 1000 == 0:
log( '%i lines processed' % lines )
#log( self.stats )
log( '%i lines processed' % lines )
class MultiFastaReaderContainer(object):
def __init__(self, genome):
reader = MultiFastaReader( genome )
self.fastas = {}
for item in reader.items():
self.fastas[item.name] = item
def find_chromosome( self, name ):
if name in self.fastas:
return self.fastas[name]
for key in self.fastas:
if key.split( ' ' )[0] == name:
return self.fastas[key]
return None
class MultiFastaReader(object):
'''
use items to iterate over a list of FastaReader objects
'''
def __init__(self, genome):
'''
genome is a file like object
'''
self.genome = genome
def items( self ):
'''
iterate over fasta sequences
'''
current = []
found_header = False
for line in self.genome:
if line.startswith( '>' ):
if found_header:
yield FastaReader( genome=StringIO.StringIO( ''.join(current) ) ) # TODO inefficient
current = [ line ]
else:
found_header = True
current.append( line )
else:
current.append( line )
yield FastaReader( genome = StringIO.StringIO( ''.join(current) ) )
class ErrorGenerator(object):
def __init__( self, error_profile ):
self.error_profile = error_profile
def apply_errors( self, dna ):
result = []
for x in dna:
result.append( self.error_profile( x ) )
return ''.join( result )
@staticmethod
def create_uniform_error_profile( error_prob ):
'''snv errors with probability error_prob'''
transitions = { 'A': 'TGC', 'T': 'GCA', 'G': 'ACT', 'C': 'AGT', 'N': 'N' }
def uniform_error_profile( bp ):
if random.random() < error_prob:
return transitions[bp][random.randint(0, len(transitions[bp]) - 1)]
else:
return bp
return uniform_error_profile
@staticmethod
def create_homopolymer_error_profile( error_prob, error_length ):
possibles = 'ACGT'
def homopolymer_error_profile( bp ):
if random.random() < error_prob:
return ''.join( ( bp, possibles[random.randint(0, len(possibles)-1)] * error_length ) )
else:
return bp
return homopolymer_error_profile
class RepeatedMultiFastaGenerator( object ):
def __init__( self, multi_reader, out_fh, multiplier, cfg ):
for reader in multi_reader.items():
RepeatedFastaGenerator( reader, out_fh, multiplier, cfg )
class RepeatedFastaGenerator( object ):
def __init__( self, reader, out_fh, multiplier, cfg, log=bio.log_quiet ):
'''
this just makes repeats of an entire fasta with a specified snp mutation prob
@reader: FastaReader
@cfg: config dictionary
'''
# reader = bio.FastaReader( open( sys.argv[2], 'r' ) )
out_fh.write( '>generated fasta %ix from %s\n' % ( multiplier, reader.name ) )
sequence = StringIO.StringIO()
for item in reader.items():
if len(item) > 0:
out_fh.write( '%s\n' % item ) # write unmodified
sequence.write( '%s\n' % item ) # remember
for i in xrange(0, multiplier - 1):
sequence.seek(0)
reader = FastaReader( sequence )
mutator = FastaMutate( reader, snp_prob=float(cfg['mult_snp_prob']), insert_prob=0, delete_prob=0, log=log )
for item in mutator.items():
if len(item) > 0:
out_fh.write( '%s\n' % item ) # write unmodified
#sys.stdout.write( sequence )
BASES = 'ACGT'
class SequenceGenerator( object ):
def __init__( self, length, probs=(0.25, 0.25, 0.25, 0.25), repeat_length=None ):
result = []
if repeat_length is None:
repeat_length = length
for _ in xrange( repeat_length ):
x = random.random()
for prob_idx in xrange(len(probs)):
if x < sum(probs[:prob_idx+1]):
result.append(BASES[prob_idx])
break
if length > 0 and repeat_length > 0:
mult = length / repeat_length
if repeat_length % length != 0:
mult += 1
self.sequence = ''.join(result * mult)
self.sequence = self.sequence[:length]
else:
self.sequence = ''
def mutate( self, position ):
current_base_index = BASES.index(self.sequence[position])
next_base_index = ( current_base_index + 1 ) % len(BASES)
new_base = BASES[ next_base_index ]
self.sequence = self.sequence[:position] + new_base + self.sequence[position+1:]
if __name__ == "__main__":
pass
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import functools
import inspect
import os
import re
import codegen
import six
# A regular expression capturing a python indentifier.
IDENTIFIER_RE = '[a-zA-Z_][a-zA-Z0-9_]*'
def documentation_path(full_name):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
Returns:
The file path to which to write the documentation for `full_name`.
"""
dirs = full_name.split('.')
return os.path.join(*dirs) + '.md'
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (inspect.isclass(py_object) or inspect.ismethod(py_object) or
inspect.isfunction(py_object) or inspect.ismodule(py_object) or
isinstance(py_object, property)):
return inspect.getdoc(py_object) or ''
else:
return ''
def _get_brief_docstring(py_object):
"""Gets the one line docstring of a python object."""
return _get_raw_docstring(py_object).split('\n')[0]
def _reference_to_link(ref_full_name, relative_path_to_root, duplicate_of):
"""Resolve a "@{symbol}" reference to a relative path, respecting duplicates.
The input to this function should already be stripped of the '@' and '{}', and
its output is only the link, not the full Markdown.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
duplicate_of: A map from duplicate full names to master names.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
"""
master_name = duplicate_of.get(ref_full_name, ref_full_name)
ref_path = documentation_path(master_name)
return os.path.join(relative_path_to_root, ref_path)
def _markdown_link(link_text, ref_full_name, relative_path_to_root,
duplicate_of):
"""Resolve a "@{symbol}" reference to a Markdown link, respecting duplicates.
The input to this function should already be stripped of the '@' and '{}'.
This function returns a Markdown link. It is assumed that this is a code
reference, so the link text will always be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
duplicate_of: A map from duplicate full names to master names.
Returns:
A markdown link from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
"""
return '[`%s`](%s)' % (
link_text,
_reference_to_link(ref_full_name, relative_path_to_root, duplicate_of))
def replace_references(string, relative_path_to_root, duplicate_of):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string` and replaces
them with markdown links to the documentation page for "symbol".
`relative_path_to_root` is the relative path from the document that contains
the "@{symbol}" reference to the root of the API documentation that is linked
to. If the containing page is part of the same API docset,
`relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the python name
of the object whose documentation page the reference lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the contianing document to the
root of the API documentation that is being linked to.
duplicate_of: A map from duplicate names to preferred names of API symbols.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
full_name_re = '%s(.%s)*' % (IDENTIFIER_RE, IDENTIFIER_RE)
symbol_reference_re = re.compile(r'@\{(' + full_name_re + r')\}')
return re.sub(symbol_reference_re,
lambda match: _markdown_link(match.group(1), match.group(1), # pylint: disable=g-long-lambda
relative_path_to_root,
duplicate_of),
string)
def _md_docstring(py_object, relative_path_to_root, duplicate_of):
"""Get the docstring from an object and make it into nice Markdown.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to
```python
relative_path_to_root = os.path.relpath(
os.path.dirname(documentation_path(full_name)) or '.', '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation. This is used to compute
links for "@symbol" references.
duplicate_of: A map from duplicate symbol names to master names. Used to
resolve "@symbol" references.
Returns:
The docstring, or the empty string if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_lines = raw_docstring.split('\n')
# Define regular expressions used during parsing below.
symbol_list_item_re = re.compile(r'^ (%s): ' % IDENTIFIER_RE)
section_re = re.compile(r'^(\w+):\s*$')
# Translate docstring line by line.
in_special_section = False
lines = []
def is_section_start(i):
# Previous line is empty, line i is "Word:", and next line is indented.
return (i > 0 and not raw_lines[i-1].strip() and
re.match(section_re, raw_lines[i]) and
len(raw_lines) > i+1 and raw_lines[i+1].startswith(' '))
for i, line in enumerate(raw_lines):
if not in_special_section and is_section_start(i):
in_special_section = True
lines.append('#### ' + section_re.sub(r'\1:', line))
lines.append('')
continue
# If the next line starts a new section, this one ends. Add an extra line.
if in_special_section and is_section_start(i+1):
in_special_section = False
lines.append('')
if in_special_section:
# Translate symbols in 'Args:', 'Parameters:', 'Raises:', etc. sections.
lines.append(symbol_list_item_re.sub(r'* <b>`\1`</b>: ', line))
else:
lines.append(line)
docstring = '\n'.join(lines)
# TODO(deannarubin): Improve formatting for devsite
# TODO(deannarubin): Interpret @compatibility and other formatting notes.
return replace_references(docstring, relative_path_to_root, duplicate_of)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `inspect.getargspec`. For `functools.partial` objects,
corrects the signature of the underlying function to take into account the
removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `ArgSpec` namedtuple `(args, varargs, keywords, defaults)`, as returned
by `inspect.getargspec`.
"""
# getargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = inspect.getargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return inspect.ArgSpec(args=argspec_args,
varargs=argspec.varargs,
keywords=argspec.keywords,
defaults=tuple(argspec_defaults))
else: # Regular function or method, getargspec will work fine.
return inspect.getargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', string).group(0))
return '\n'.join([line[indent:] for line in string.split('\n')])
def _generate_signature(func, reverse_index):
"""Given a function, returns a string representing its args.
This function produces a string representing the arguments to a python
function, including surrounding parentheses. It uses inspect.getargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A string representing the signature of `func` as python code.
"""
# This produces poor signatures for decorated functions.
# TODO(wicke): We need to use something like the decorator module to fix it.
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
source = _remove_first_line_indent(inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
else:
default_text = codegen.to_source(ast_default)
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
print('Using default arg, failed lookup: %s, repr: %r' % (
default_text, default))
else:
default_text = lookup_text
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + argspec.varargs)
if argspec.keywords:
args_list.append('**' + argspec.keywords)
return '(%s)' % ', '.join(args_list)
def _generate_markdown_for_function(full_name, duplicate_names,
function, duplicate_of, reverse_index):
"""Generate Markdown docs for a function or method.
This function creates a documentation page for a function. It uses the
function name (incl. signature) as the title, followed by a list of duplicate
names (if there are any), and the Markdown formatted docstring of the
function.
Args:
full_name: The preferred name of the function. Used in the title. Must not
be present in `duplicate_of` (master names never are).
duplicate_names: A sorted list of alternative names (incl. `full_name`).
function: The python object referenced by `full_name`.
duplicate_of: A map of duplicate full names to master names. Used to resolve
@{symbol} references in the docstring.
reverse_index: A map from object ids in the index to full names.
Returns:
A string that can be written to a documentation file for this function.
"""
# TODO(wicke): Make sure this works for partials.
relative_path = os.path.relpath(
os.path.dirname(documentation_path(full_name)) or '.', '.')
docstring = _md_docstring(function, relative_path, duplicate_of)
signature = _generate_signature(function, reverse_index)
if duplicate_names:
aliases = '\n'.join(['### `%s`' % (name + signature)
for name in duplicate_names])
aliases += '\n\n'
else:
aliases = ''
return '#`%s%s`\n\n%s%s' % (full_name, signature, aliases, docstring)
def _generate_markdown_for_class(full_name, duplicate_names, py_class,
duplicate_of, index, tree, reverse_index):
"""Generate Markdown docs for a class.
This function creates a documentation page for a class. It uses the
class name as the title, followed by a list of duplicate
names (if there are any), the Markdown formatted docstring of the
class, a list of links to all child class docs, a list of all properties
including their docstrings, a list of all methods incl. their docstrings, and
a list of all class member names (public fields).
Args:
full_name: The preferred name of the class. Used in the title. Must not
be present in `duplicate_of` (master names never are).
duplicate_names: A sorted list of alternative names (incl. `full_name`).
py_class: The python object referenced by `full_name`.
duplicate_of: A map of duplicate full names to master names. Used to resolve
@{symbol} references in the docstrings.
index: A map from full names to python object references.
tree: A map from full names to the names of all documentable child objects.
reverse_index: A map from object ids in the index to full names.
Returns:
A string that can be written to a documentation file for this class.
"""
relative_path = os.path.relpath(
os.path.dirname(documentation_path(full_name)) or '.', '.')
docstring = _md_docstring(py_class, relative_path, duplicate_of)
if duplicate_names:
aliases = '\n'.join(['### `class %s`' % name for name in duplicate_names])
aliases += '\n\n'
else:
aliases = ''
docs = '# `%s`\n\n%s%s\n\n' % (full_name, aliases, docstring)
field_names = []
properties = []
methods = []
class_links = []
for member in tree[full_name]:
child_name = '.'.join([full_name, member])
child = index[child_name]
if isinstance(child, property):
properties.append((member, child))
elif inspect.isclass(child):
class_links.append(_markdown_link('class ' + member, child_name,
relative_path, duplicate_of))
elif inspect.ismethod(child) or inspect.isfunction(child):
methods.append((member, child))
else:
# TODO(wicke): We may want to also remember the object itself.
field_names.append(member)
if class_links:
docs += '## Child Classes\n'
docs += '\n\n'.join(sorted(class_links))
docs += '\n\n'
if properties:
docs += '## Properties\n\n'
for property_name, prop in sorted(properties, key=lambda x: x[0]):
docs += '### `%s`\n\n%s\n\n' % (
property_name, _md_docstring(prop, relative_path, duplicate_of))
docs += '\n\n'
if methods:
docs += '## Methods\n\n'
for method_name, method in sorted(methods, key=lambda x: x[0]):
method_signature = method_name + _generate_signature(method,
reverse_index)
docs += '### `%s`\n\n%s\n\n' % (method_signature,
_md_docstring(method, relative_path,
duplicate_of))
docs += '\n\n'
if field_names:
docs += '## Class Members\n\n'
# TODO(wicke): Document the value of the members, at least for basic types.
docs += '\n\n'.join(sorted(field_names))
docs += '\n\n'
return docs
def _generate_markdown_for_module(full_name, duplicate_names, module,
duplicate_of, index, tree):
"""Generate Markdown docs for a module.
This function creates a documentation page for a module. It uses the
module name as the title, followed by a list of duplicate
names (if there are any), the Markdown formatted docstring of the
class, and a list of links to all members of this module.
Args:
full_name: The preferred name of the module. Used in the title. Must not
be present in `duplicate_of` (master names never are).
duplicate_names: A sorted list of alternative names (incl. `full_name`).
module: The python object referenced by `full_name`.
duplicate_of: A map of duplicate full names to master names. Used to resolve
@{symbol} references in the docstrings.
index: A map from full names to python object references.
tree: A map from full names to the names of all documentable child objects.
Returns:
A string that can be written to a documentation file for this module.
"""
relative_path = os.path.relpath(
os.path.dirname(documentation_path(full_name)) or '.', '.')
docstring = _md_docstring(module, relative_path, duplicate_of)
if duplicate_names:
aliases = '\n'.join(['### Module `%s`' % name for name in duplicate_names])
aliases += '\n\n'
else:
aliases = ''
member_names = tree.get(full_name, [])
# Make links to all members.
member_links = []
for name in member_names:
member_full_name = full_name + '.' + name if full_name else name
member = index[member_full_name]
suffix = ''
if inspect.isclass(member):
link_text = 'class ' + name
elif inspect.isfunction(member):
link_text = name + '(...)'
elif inspect.ismodule(member):
link_text = name
suffix = ' module'
else:
member_links.append('Constant ' + name)
continue
brief_docstring = _get_brief_docstring(member)
if brief_docstring:
suffix = '%s: %s' % (suffix, brief_docstring)
member_links.append(_markdown_link(link_text, member_full_name,
relative_path, duplicate_of) + suffix)
# TODO(deannarubin): Make this list into a table and add the brief docstring.
# (use _get_brief_docstring)
return '# Module `%s`\n\n%s%s\n\n## Members\n\n%s' % (
full_name, aliases, docstring, '\n\n'.join(member_links))
_CODE_URL_PREFIX = (
'https://www.tensorflow.org/code/')
def generate_markdown(full_name, py_object,
duplicate_of, duplicates,
index, tree, reverse_index, base_dir):
"""Generate Markdown docs for a given object that's part of the TF API.
This function uses _md_docstring to obtain the docs pertaining to
`object`.
This function resolves '@symbol' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
The output is Markdown that can be written to file and published.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
duplicate_of: A `dict` mapping fully qualified names to "master" names. This
is used to resolve "@{symbol}" references to the "master" name.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of all
aliases for each name.
index: A `dict` mapping fully qualified names to the corresponding Python
objects. Used to produce docs for child objects, and to check the validity
of "@{symbol}" references.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
reverse_index: A `dict` mapping objects in the index to full names.
base_dir: A base path that is stripped from file locations written to the
docs.
Returns:
A string containing the Markdown docs for `py_object`.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = duplicate_of.get(full_name, full_name)
duplicate_names = duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (inspect.ismethod(py_object) or inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
inspect.isroutine(py_object)):
markdown = _generate_markdown_for_function(master_name, duplicate_names,
py_object, duplicate_of,
reverse_index)
elif inspect.isclass(py_object):
markdown = _generate_markdown_for_class(master_name, duplicate_names,
py_object, duplicate_of,
index, tree, reverse_index)
elif inspect.ismodule(py_object):
markdown = _generate_markdown_for_module(master_name, duplicate_names,
py_object, duplicate_of,
index, tree)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
# Every page gets a note on the bottom about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(inspect.getfile(py_object), base_dir)
# TODO(wicke): If this is a generated file, point to the source instead.
# Never include links outside this code base.
if not path.startswith('..'):
markdown += '\n\nDefined in [`%s`](%s%s).\n\n' % (
path, _CODE_URL_PREFIX, path)
except TypeError: # getfile throws TypeError if py_object is a builtin.
markdown += '\n\nThis is an alias for a Python built-in.'
return markdown
def generate_global_index(library_name, index, duplicate_of):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
duplicate_of: A map of duplicate names to preferred names.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (inspect.ismodule(py_object) or inspect.isfunction(py_object) or
inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((index_name,
_markdown_link(index_name, full_name,
'.', duplicate_of)))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(deannarubin): Make this list into a table and add the brief docstring.
# (use _get_brief_docstring)
return '\n'.join(lines)
|
|
import io
import json
import autobahn.websocket.types as A
from twisted.trial import unittest
from twisted.internet import error
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import Protocol, Factory, connectionDone
from twisted.test.proto_helpers import StringTransport
from twisted.internet.task import Clock
from twisted.internet.address import IPv4Address
# TODO: don't use twisted's private test APIs
from twisted.web.test.requesthelper import DummyRequest
from zope.interface import Interface, implementer, implementedBy
from .. import protocol as P
class sockJSJSONTestCase(unittest.SynchronousTestCase):
def test_sockJSJSON(self):
self.assertEqual(P.sockJSJSON([3000, 'Go away!']),
b'[3000,"Go away!"]')
class HeartbeatClockTestCase(unittest.TestCase):
def setUp(self):
self.clock = Clock()
self.period = 25.0
self.heartbeats = 0
def fakeHeartbeat():
self.heartbeats += 1
self.heartbeater = P.HeartbeatClock(fakeHeartbeat,
period=self.period,
clock=self.clock)
def test_neverScheduled(self):
'''Heartbeats are not scheduled before their first schedule(), and are
not scheduled if we immediately stop() the HeartbeatClock. A
stopped HeartbeatClock can never schedule any other
heartbeats.
'''
self.assertFalse(self.clock.getDelayedCalls())
self.heartbeater.stop()
self.assertFalse(self.clock.getDelayedCalls())
with self.assertRaises(RuntimeError):
self.heartbeater.schedule()
self.assertFalse(self.clock.getDelayedCalls())
def test_schedule(self):
'''Heartbeats are scheduled and recur if not interrupted.'''
self.heartbeater.schedule()
pendingBeat = self.heartbeater.pendingHeartbeat
self.assertEqual(self.clock.getDelayedCalls(), [pendingBeat])
self.clock.advance(self.period * 2)
self.assertEqual(self.heartbeats, 1)
rescheduledPendingBeat = self.heartbeater.pendingHeartbeat
self.assertIsNot(pendingBeat, rescheduledPendingBeat)
self.assertEqual(self.clock.getDelayedCalls(),
[rescheduledPendingBeat])
def test_schedule_interrupts(self):
'''A schedule() call will remove the pending heartbeat and reschedule
it for later.
'''
self.heartbeater.schedule()
pendingBeat = self.heartbeater.pendingHeartbeat
self.assertEqual(self.clock.getDelayedCalls(), [pendingBeat])
self.heartbeater.schedule()
self.assertFalse(self.heartbeats)
rescheduledPendingBeat = self.heartbeater.pendingHeartbeat
self.assertEqual(self.clock.getDelayedCalls(),
[rescheduledPendingBeat])
def test_schedule_stop(self):
'''A stop() call removes any pending heartbeats.'''
self.heartbeater.schedule()
pendingBeat = self.heartbeater.pendingHeartbeat
self.assertEqual(self.clock.getDelayedCalls(), [pendingBeat])
self.heartbeater.stop()
self.assertFalse(self.heartbeats)
self.assertFalse(self.clock.getDelayedCalls())
# this does not raise an exception
self.heartbeater.stop()
class TestProtocol(Protocol):
connectionMadeCalls = 0
def connectionMade(self):
self.connectionMadeCalls += 1
if self.connectionMadeCalls > 1:
assert False, "connectionMade must only be called once"
class RecordingProtocol(TestProtocol):
def dataReceived(self, data):
self.factory.receivedData.append(data)
def connectionLost(self, reason):
self.factory.connectionsLost.append(reason)
class RecordingProtocolFactory(Factory):
protocol = RecordingProtocol
def __init__(self, receivedData, connectionsLost):
self.receivedData = receivedData
self.connectionsLost = connectionsLost
class EchoProtocol(TestProtocol):
DISCONNECT = 'DISCONNECT'
def dataReceived(self, data):
if isinstance(data, list):
self.transport.writeSequence(data)
else:
self.transport.write(data)
def connectionLost(self, reason):
self.factory.connectionLost.append(reason)
class EchoProtocolFactory(Factory):
protocol = EchoProtocol
def __init__(self, connectionLost):
self.connectionLost = connectionLost
class SockJSWireProtocolWrapperTestCase(unittest.TestCase):
'''Sanity tests for SockJS transport base class.'''
def setUp(self):
self.transport = StringTransport()
self.receivedData = []
self.connectionsLost = []
self.wrappedFactory = RecordingProtocolFactory(self.receivedData,
self.connectionsLost)
self.factory = self.makeFactory()
self.address = IPv4Address('TCP', '127.0.0.1', 80)
self.protocol = self.factory.buildProtocol(self.address)
self.protocol.makeConnection(self.transport)
def makeFactory(self):
'''Returns the WrappingFactory for this test case. Override me in
subclasses that test different session wrapper protocols.
'''
return P.SockJSWireProtocolWrappingFactory(self.wrappedFactory)
def test_writeOpen(self):
'''writeOpen writes a single open frame.'''
self.protocol.writeOpen()
self.assertEqual(self.transport.value(), b'o')
def test_writeHeartbeat(self):
'''writeHeartbeat writes a single heartbeat frame.'''
self.protocol.writeHeartbeat()
self.assertEqual(self.transport.value(), b'h')
def test_writeClose(self):
'''writeClose writes a close frame containing the provided reason.'''
self.protocol.writeClose(P.DISCONNECT.GO_AWAY)
self.assertEqual(self.transport.value(), b'c[3000,"Go away!"]')
def test_writeData(self):
'''writeData writes the provided data to the transport.'''
self.protocol.writeData(["letter", 2])
self.assertEqual(self.transport.value(), b'a["letter",2]')
def test_dataReceived(self):
'''The wrapped protocol receives deserialized JSON data.'''
self.protocol.dataReceived(b'["letter",2]')
self.protocol.dataReceived(b'["another",null]')
self.assertEqual(self.receivedData, [["letter", 2],
["another", None]])
def test_closeFrame(self):
'''closeFrame returns a serialized close frame for use by the
caller.
'''
frame = self.protocol.closeFrame(P.DISCONNECT.GO_AWAY)
self.assertEqual(frame, b'c[3000,"Go away!"]')
self.assertFalse(self.transport.value())
def test_emptyDataReceived(self):
'''The wrapped protocol does not receive empty strings and the sender
receives an error message.
'''
with self.assertRaises(P.InvalidData) as excContext:
self.protocol.dataReceived(b'')
self.assertEqual(excContext.exception.reason,
P.INVALID_DATA.NO_PAYLOAD.value)
self.assertFalse(self.receivedData)
def test_badJSONReceived(self):
'''The wrapped protocol does not receive malformed JSON and the sender
receives an error message.
'''
with self.assertRaises(P.InvalidData) as excContext:
self.protocol.dataReceived(b'!!!')
self.assertEqual(excContext.exception.reason,
P.INVALID_DATA.BAD_JSON.value)
self.assertFalse(self.receivedData)
def test_jsonEncoder(self):
'''SockJSWireProtocolWrapper can use a json.JSONEncoder subclass for
writes.
'''
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return [obj.real, obj.imag]
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
factory = P.SockJSWireProtocolWrappingFactory(
self.wrappedFactory,
jsonEncoder=ComplexEncoder)
encodingProtocol = factory.buildProtocol(self.address)
encodingProtocol.makeConnection(self.transport)
encodingProtocol.writeData([2 + 1j])
self.assertEqual(self.transport.value(), b'a[[2.0,1.0]]')
def test_jsonDecoder(self):
'''SockJSWireProtocolWrapper can use a json.JSONDecoder subclass for
receives.
'''
class SetDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs['object_hook'] = self.set_object_hook
super(SetDecoder, self).__init__(*args, **kwargs)
def set_object_hook(self, obj):
if isinstance(obj, dict) and obj.get('!set'):
return set(obj['!set'])
return obj
factory = P.SockJSWireProtocolWrappingFactory(
self.wrappedFactory,
jsonDecoder=SetDecoder)
encodingProtocol = factory.buildProtocol(self.address)
encodingProtocol.makeConnection(self.transport)
encodingProtocol.dataReceived(b'{"!set": [1, 2, 3]}')
self.assertEqual(self.receivedData, [{1, 2, 3}])
class RecordsWireProtocolActions(object):
def __init__(self):
self.lostConnection = 0
self.wroteOpen = 0
self.wroteHeartbeat = 0
self.wroteData = []
self.wroteClose = []
def empty(self):
return not any([self.lostConnection,
self.wroteOpen,
self.wroteHeartbeat,
self.wroteData,
self.wroteClose])
class FakeSockJSWireProtocol(object):
def __init__(self, recorder):
self._recorder = recorder
def loseConnection(self):
self._recorder.lostConnection += 1
def writeOpen(self):
self._recorder.wroteOpen += 1
def writeHeartbeat(self):
self._recorder.wroteHeartbeat += 1
def writeData(self, data):
self._recorder.wroteData.append(data)
def writeClose(self, reason):
self._recorder.wroteClose.append(reason)
class RecordsHeartbeat(object):
def __init__(self):
self.scheduleCalls = 0
self.stopCalls = 0
def scheduleCalled(self):
self.scheduleCalls += 1
def stopCalled(self):
self.stopCalls += 1
class FakeHeartbeatClock(object):
def __init__(self, recorder):
self.writeHeartbeat = None
self._recorder = recorder
def schedule(self):
self._recorder.scheduleCalled()
def stop(self):
self._recorder.stopCalled()
class SockJSProtocolMachineTestCase(unittest.TestCase):
def setUp(self):
self.heartbeatRecorder = RecordsHeartbeat()
self.heartbeater = FakeHeartbeatClock(self.heartbeatRecorder)
self.sockJSMachine = P.SockJSProtocolMachine(self.heartbeater)
self.protocolRecorder = RecordsWireProtocolActions()
self.sockJSWireProtocol = FakeSockJSWireProtocol(self.protocolRecorder)
def test_disconnectBeforeConnect(self):
'''Disconnecting before connecting permanently disconnects
a SockJSProtocolMachine.
'''
self.sockJSMachine.disconnect()
self.assertTrue(self.protocolRecorder.empty())
with self.assertRaises(KeyError):
self.sockJSMachine.connect(self.sockJSWireProtocol)
def test_connect(self):
'''SockJSProtocolMachine.connect writes an opening frame and schedules
a heartbeat.
'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.heartbeatRecorder.scheduleCalls, 1)
def test_write(self):
'''SockJSProtocolMachine.write writes the requested data and
(re)schedules a heartbeat.
'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
self.sockJSMachine.write([1, 'something'])
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.protocolRecorder.wroteData, [[1, 'something']])
self.assertEqual(self.heartbeatRecorder.scheduleCalls, 2)
def test_heartbeat(self):
'''SockJSProtocolMachine.heartbeat writes a heartbeat!'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
self.sockJSMachine.heartbeat()
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.protocolRecorder.wroteHeartbeat, 1)
self.assertEqual(self.heartbeatRecorder.scheduleCalls, 1)
def test_withHeartBeater(self):
'''SockJSProtocolMachine.withHeartbeater should associate a new
instance's heartbeat method with the heartbeater.
'''
instance = P.SockJSProtocolMachine.withHeartbeater(
self.heartbeater)
instance.connect(self.sockJSWireProtocol)
self.heartbeater.writeHeartbeat()
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.protocolRecorder.wroteHeartbeat, 1)
self.assertEqual(self.heartbeatRecorder.scheduleCalls, 1)
def test_receive(self):
'''SockJSProtocolMachine.receive passes decoded data through.'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
data = [1, 'something']
self.assertEqual(self.sockJSMachine.receive(data), data)
def test_disconnect(self):
'''SockJSProtocolMachine.disconnect implements an active close: it
writes a close frame, disconnects the transport, and cancels
any pending heartbeats.
'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
self.sockJSMachine.disconnect(reason=P.DISCONNECT.GO_AWAY)
self.assertIs(None, self.sockJSMachine.transport)
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.protocolRecorder.wroteClose,
[P.DISCONNECT.GO_AWAY])
self.assertEqual(self.protocolRecorder.lostConnection, 1)
self.assertEqual(self.heartbeatRecorder.stopCalls, 1)
def test_close(self):
'''SockJSProtocolMachine.close implements a passive close: it drops
the transport and cancels any pending heartbeats.
'''
self.sockJSMachine.connect(self.sockJSWireProtocol)
self.sockJSMachine.close()
self.assertIs(None, self.sockJSMachine.transport)
self.assertEqual(self.protocolRecorder.wroteOpen, 1)
self.assertEqual(self.heartbeatRecorder.stopCalls, 1)
class RecordsProtocolMachineActions(object):
def __init__(self):
self.connect = []
self.received = []
self.written = []
self.disconnected = 0
self.closed = 0
class FakeSockJSProtocolMachine(object):
def __init__(self, recorder):
self._recorder = recorder
def connect(self, transport):
self._recorder.connect.append(transport)
def receive(self, data):
self._recorder.received.append(data)
return data
def write(self, data):
self._recorder.written.append(data)
def disconnect(self):
self._recorder.disconnected += 1
def close(self):
self._recorder.closed += 1
class SockJSProtocolTestCase(unittest.TestCase):
def setUp(self):
self.connectionLost = []
self.stateMachineRecorder = RecordsProtocolMachineActions()
wrappedFactory = EchoProtocolFactory(self.connectionLost)
self.factory = P.SockJSProtocolFactory(wrappedFactory)
def fakeStateMachineFactory():
return FakeSockJSProtocolMachine(self.stateMachineRecorder)
self.factory.stateMachineFactory = fakeStateMachineFactory
self.address = IPv4Address('TCP', '127.0.0.1', 80)
self.transport = StringTransport()
self.protocol = self.factory.buildProtocol(self.address)
self.protocol.makeConnection(self.transport)
def test_makeConnection(self):
'''makeConnection connects the state machine to the transport.'''
self.assertEqual(self.stateMachineRecorder.connect, [self.transport])
def test_dataReceived_write(self):
'''dataReceived passes the data to the state machine's receive method
and the wrapped protocol. With our echo protocol, we also
test that write() calls the machine's write method.
'''
self.protocol.dataReceived(b'"something"')
self.assertEqual(self.stateMachineRecorder.received,
[b'"something"'])
self.assertEqual(self.stateMachineRecorder.written,
[b'"something"'])
def test_dataReceived_writeSequence(self):
'''dataReceived passes the data to the state machine's receive method
and the wrapped protocol. With our echo protocol, we also
test that writeSequence() calls the machine's write method.
'''
self.protocol.dataReceived([b'"x"', b'"y"'])
self.assertEqual(self.stateMachineRecorder.received,
[[b'"x"', b'"y"']])
# multiple write calls
self.assertEqual(self.stateMachineRecorder.written,
[b'"x"', b'"y"'])
def test_loseConnection(self):
'''loseConnection calls the state machine's disconnect method.'''
self.protocol.loseConnection()
self.assertEqual(self.stateMachineRecorder.disconnected, 1)
def test_connectionLost(self):
'''connectionLost calls the state machine's close method and the
wrapped protocol's connectionLost method.
'''
reason = "This isn't a real reason"
self.protocol.connectionLost(reason)
self.assertEqual(self.stateMachineRecorder.closed, 1)
self.assertEqual(self.connectionLost, [reason])
class RecordsRequestSessionActions(object):
def __init__(self):
self.request = None
self.connectionsEstablished = []
self.connectionsCompleted = 0
self.requestsBegun = 0
# TODO - these next two are needlessly confusing -- rename one
# or both!
self.receivedData = []
self.dataReceived = []
self.completelyWritten = []
self.otherRequestsClosed = []
self.dataWritten = []
self.heartbeatsCompleted = 0
self.currentRequestsFinished = 0
self.connectionsLostCompletely = 0
self.connectionsCompletelyLost = []
self.connectionsMadeFromRequest = []
class FakeRequestSessionProtocolWrapper(object):
def __init__(self, recorder):
self.recorder = recorder
self.terminationDeferred = Deferred()
@property
def request(self):
return self.recorder.request
@request.setter
def request(self, request):
self.recorder.request = request
def makeConnectionFromRequest(self, request):
self.recorder.connectionsMadeFromRequest.append(request)
def establishConnection(self, request):
self.recorder.connectionsEstablished.append(request)
def completeConnection(self):
self.recorder.connectionsCompleted += 1
def beginRequest(self):
self.recorder.requestsBegun += 1
def completeDataReceived(self, data):
self.recorder.dataReceived.append(data)
def closeOtherRequest(self, request, reason):
self.recorder.otherRequestsClosed.append((request, reason))
def dataReceived(self, data):
self.recorder.receivedData.append(data)
def writeData(self, data):
self.recorder.dataWritten.append(data)
def completeWrite(self, data):
self.recorder.completelyWritten.append(data)
def completeHeartbeat(self):
self.recorder.heartbeatsCompleted += 1
def finishCurrentRequest(self):
self.recorder.currentRequestsFinished += 1
self.request = None
def closeFrame(self, reason):
return reason
def completeLoseConnection(self):
self.recorder.connectionsLostCompletely += 1
def completeConnectionLost(self, reason):
self.recorder.connectionsCompletelyLost.append(reason)
class DummyRequestAllowsNonBytes(DummyRequest):
'''A DummyRequest subclass that does not assert write has been called
with bytes. Use me when you want to inspect something that an
intermediary would have serialized before writing it to the
request.
'''
def write(self, data):
self.written.append(data)
class RequestSessionMachineTestCase(unittest.TestCase):
def setUp(self):
self.recorder = RecordsRequestSessionActions()
self.fakeRequestSession = FakeRequestSessionProtocolWrapper(
self.recorder)
self.requestSessionMachine = P.RequestSessionMachine(
self.fakeRequestSession)
self.request = DummyRequest([b'ignored'])
def test_firstAttach(self):
'''Attaching the first request to a RequestSessionMachine sets up the
the protocol wrapper, begins the request, then attaches the
protocol wrapper to the wrapped protocol as its transport.
'''
self.requestSessionMachine.attach(self.request)
self.assertIs(self.recorder.request, self.request)
self.assertEqual(self.recorder.connectionsEstablished, [self.request])
self.assertEqual(self.recorder.requestsBegun, 1)
self.assertEqual(self.recorder.connectionsCompleted, 1)
def test_connectedHaveTransportWrite(self):
'''With an attached request, write calls completeWrite and does not
buffer.
'''
self.test_firstAttach()
self.requestSessionMachine.write(b"abc")
self.assertEqual(self.recorder.completelyWritten, [b"abc"])
def test_connectedHaveTransportReceive(self):
'''With an attached request, received data passes on to the wrapped
protocol.
'''
self.test_firstAttach()
self.requestSessionMachine.receive(b"abc")
self.assertEqual(self.recorder.dataReceived, [b"abc"])
def test_connectedHaveTransportHeartbeat(self):
'''With an attached request, heartbeats are immediately sent.'''
self.test_firstAttach()
self.requestSessionMachine.heartbeat()
self.assertEqual(self.recorder.heartbeatsCompleted, 1)
def test_connectedHaveTransportDetach(self):
'''Detaching from a request finishes that request.'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.assertEqual(self.recorder.currentRequestsFinished, 1)
def assertDuplicateRequestClosedWith(self, reason):
'''Assert that a second request is finished with the given reason.'''
duplicateRequest = 'not really a request'
self.requestSessionMachine.attach(duplicateRequest)
self.assertEqual(self.recorder.otherRequestsClosed,
[(duplicateRequest, reason)])
def test_connectedHaveTransportDuplicateAttach(self):
'''Attempting to attach a request to a RequestSessionMachine that's
already attached to a request closes the attached request.
'''
self.test_firstAttach()
self.assertDuplicateRequestClosedWith(P.DISCONNECT.STILL_OPEN)
duplicateRequest = 'not really a request'
self.requestSessionMachine.attach(duplicateRequest)
def test_connectedHaveTransportWriteCloseAndLoseConnection(self):
'''Writing a close frame to a RequestSessionMachine stores it on the
machine so it will be written upon loseConnection.
'''
self.test_firstAttach()
self.requestSessionMachine.writeClose(P.DISCONNECT.GO_AWAY)
self.requestSessionMachine.loseConnection()
self.assertDuplicateRequestClosedWith(P.DISCONNECT.GO_AWAY)
self.assertEqual(self.recorder.connectionsLostCompletely, 1)
def test_connectedHaveTransportLoseConnection(self):
'''Losing the connection closes the connection and closes the
wrapped protocol.
'''
self.test_firstAttach()
self.requestSessionMachine.loseConnection()
self.assertEqual(self.recorder.connectionsLostCompletely, 1)
def test_connectedHaveTransportConnectionLost(self):
'''connectionLost unsets the RequestSession's request (but does *not*
call its finish() a second time) and calls the wrapped
protocol's connectionLost.
'''
self.test_firstAttach()
self.requestSessionMachine.connectionLost(reason="Some Reason")
self.assertIsNone(self.recorder.request)
self.assertEqual(self.recorder.connectionsCompletelyLost,
["Some Reason"])
self.assertFalse(self.request.finished)
def test_connectedNoTransportEmptyBufferReceive(self):
'''The wrapped protocol receives data even when there's no attached
outgoing request.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
deserializedMessage = ["I wasn't serialized'"]
self.requestSessionMachine.receive(deserializedMessage)
def test_connectedNoTransportEmptyBufferHeartbeat(self):
'''Heartbeats are not sent when there's no attached request and the
write buffer is empty.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.heartbeat()
self.assertEqual(self.recorder.heartbeatsCompleted, 0)
def test_connectedNoTransportEmptyBufferDetach(self):
'''Detaching a RequestSessionMachine that's already detached is a safe
noop, so wrappers can always call detach() safel.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.detach()
def test_connectedNoTransportEmptyBufferWriteCloseAndLoseConnection(self):
'''Writing a close frame to a RequestSessionMachine stores it on the
machine so it will be written upon loseConnection.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.writeClose(P.DISCONNECT.GO_AWAY)
self.requestSessionMachine.loseConnection()
self.assertDuplicateRequestClosedWith(P.DISCONNECT.GO_AWAY)
self.assertEqual(self.recorder.connectionsLostCompletely, 1)
def test_connectedNoTransportEmptyBufferConnectionLost(self):
'''A RequestSessionMachine with no attached request and an empty
buffer simply closes the protocol upon connectionLost.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
someReason = 'not a real reason'
self.requestSessionMachine.connectionLost(someReason)
self.assertEqual(self.recorder.connectionsCompletelyLost, [someReason])
def test_noTransportWriteThenAttach(self):
'''Writes are buffered when there's no attached request. Attaching a
request flushes the buffer.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
unserializedMessage = ["I wasn't serialized"]
self.requestSessionMachine.write(unserializedMessage)
self.requestSessionMachine.write(unserializedMessage)
self.assertEqual(self.requestSessionMachine.buffer,
unserializedMessage * 2)
newRequest = DummyRequestAllowsNonBytes([b'newRequest'])
self.requestSessionMachine.attach(newRequest)
self.assertEqual(self.requestSessionMachine.buffer, [])
# the two lists have been concatenated into one, and were
# flushed with a single call to requestSession.writeData
self.assertEqual(self.recorder.dataWritten,
[unserializedMessage * 2])
def test_connectedNoTransportPendingReceive(self):
'''Received data passes immediately to the wrapped protocol, even when
there's pending data.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.write(b'abc')
self.requestSessionMachine.receive(b'xyz')
self.assertEqual(self.recorder.dataReceived, [b'xyz'])
def test_connectedNoTransportPendingHeartbeat(self):
'''Heartbeats are not sent when there's no attached request and
pending data.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.write(b'abc')
self.requestSessionMachine.heartbeat()
self.assertEqual(self.recorder.heartbeatsCompleted, 0)
def test_connectedNoTransportPendingWriteCloseAndLoseConnection(self):
'''Writing a close frame to a RequestSessionMachine stores it on the
machine so it will be written upon loseConnection.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.write(b'abc')
self.requestSessionMachine.writeClose(P.DISCONNECT.GO_AWAY)
self.requestSessionMachine.loseConnection()
self.assertDuplicateRequestClosedWith(P.DISCONNECT.GO_AWAY)
self.assertEqual(self.recorder.connectionsLostCompletely, 1)
def test_connectedNoTransportPendingConnectionLost(self):
'''If the session times out before all data can be written,
connectionLost provides calls the wrapped protocol's ConnectionLost
with a SessionTimeout failure.
'''
self.test_firstAttach()
self.requestSessionMachine.detach()
self.requestSessionMachine.write('xyz')
self.requestSessionMachine.connectionLost()
self.assertEqual(len(self.recorder.connectionsCompletelyLost), 1)
failure = self.recorder.connectionsCompletelyLost[0]
failure.trap(P.SessionTimeout)
class TimeoutClockTestCase(unittest.TestCase):
def setUp(self):
self.timeoutDeferred = Deferred()
self.clock = Clock()
self.length = 5.0
self.timeoutClock = P.TimeoutClock(self.timeoutDeferred,
length=self.length,
clock=self.clock)
def test_start(self):
'''A timeout expires a connection if not interrupted and then
stops.
'''
self.timeoutClock.start()
pendingExpiration = self.timeoutClock.timeoutCall
self.assertEqual(self.clock.getDelayedCalls(), [pendingExpiration])
self.clock.advance(self.length * 2)
def assertTimedOutAndCannotRestartOrStop(_):
self.assertFalse(self.clock.getDelayedCalls())
self.assertIsNone(self.timeoutClock.timeoutCall)
with self.assertRaises(RuntimeError):
self.timeoutClock.start()
with self.assertRaises(RuntimeError):
self.timeoutClock.reset()
# no effect
self.timeoutClock.stop()
self.assertFalse(self.clock.getDelayedCalls())
self.assertIsNone(self.timeoutClock.timeoutCall)
self.timeoutDeferred.addCallback(assertTimedOutAndCannotRestartOrStop)
return self.timeoutDeferred
def test_reset_interrupts(self):
'''A reset() call will remove the pending timeout, so that a
subsequent start() call reschedule it.
'''
self.timeoutClock.start()
pendingExpiration = self.timeoutClock.timeoutCall
self.assertEqual(self.clock.getDelayedCalls(), [pendingExpiration])
self.timeoutClock.reset()
resetPendingExecution = self.timeoutClock.timeoutCall
self.assertIsNot(pendingExpiration, resetPendingExecution)
self.assertEqual(self.clock.getDelayedCalls(), [])
def test_stop(self):
'''A stop() call to the stops the pending timeout and is idempotent.
'''
self.timeoutClock.start()
pendingExpiration = self.timeoutClock.timeoutCall
self.assertEqual(self.clock.getDelayedCalls(), [pendingExpiration])
self.timeoutClock.stop()
self.assertEqual(self.clock.getDelayedCalls(), [])
self.assertIsNone(self.timeoutClock.timeoutCall)
self.timeoutClock.stop()
self.assertEqual(self.clock.getDelayedCalls(), [])
self.assertIsNone(self.timeoutClock.timeoutCall)
class RecordsTimeoutClockActions(object):
startCalls = 0
stopCalls = 0
resetCalls = 0
class FakeTimeoutClock(object):
def __init__(self, recorder):
self.recorder = recorder
def start(self):
self.recorder.startCalls += 1
def stop(self):
self.recorder.stopCalls += 1
def reset(self):
self.recorder.resetCalls += 1
class RecordsSessionMachineActions(object):
def __init__(self):
self.attachedRequests = []
self.detachCalls = 0
self.dataWritten = []
self.receivedData = []
self.closeReasonsWritten = []
self.heartbeatCalls = 0
self.loseConnectionCalls = 0
self.connectionsLostReasons = []
class FakeRequestSessionMachine(object):
def __init__(self, recorder):
self.recorder = recorder
def attach(self, request):
self.recorder.attachedRequests.append(request)
def detach(self):
self.recorder.detachCalls += 1
def write(self, data):
self.recorder.dataWritten.append(data)
def receive(self, data):
self.recorder.receivedData.append(data)
def writeClose(self, reason):
self.recorder.closeReasonsWritten.append(reason)
def heartbeat(self):
self.recorder.heartbeatCalls += 1
def loseConnection(self):
self.recorder.loseConnectionCalls += 1
def connectionLost(self, reason):
self.recorder.connectionsLostReasons.append(reason)
class RequestSessionProtocolWrapperTestCase(unittest.TestCase):
'''Tests for the ProtocolWrapper that adapts a
twisted.web.server.Request to a SockJS polling transport.
'''
def setUp(self):
self.receivedData = []
self.connectionsLost = []
self.timeoutClockRecorder = RecordsTimeoutClockActions()
self.timeoutClock = FakeTimeoutClock(self.timeoutClockRecorder)
self.sessionMachineRecorder = RecordsSessionMachineActions()
self.sessionMachine = FakeRequestSessionMachine(
self.sessionMachineRecorder)
self.wrappedFactory = RecordingProtocolFactory(self.receivedData,
self.connectionsLost)
# TODO: is it better to test this with SockJSProtocol? Right
# now it seems the answer is no, because it's better to test
# the protocol wrapping functionality against the generic
# interface.
self.factory = self.makeFactory()
self.factory.timeoutClockFactory = self.fakeTimeoutClockFactory
self.factory.sessionMachineFactory = self.fakeSessionMachineFactory
self.address = IPv4Address('TCP', '127.0.0.1', 80)
self.protocol = self.factory.buildProtocol(self.address)
self.request = DummyRequest([b'ignored'])
def makeFactory(self):
'''Returns the WrappingFactory for this test case. Override me in
subclasses that test different session wrapper protocols.
'''
return P.RequestSessionWrappingFactory(self.wrappedFactory)
def fakeTimeoutClockFactory(self, terminationDeferred):
return self.timeoutClock
def fakeSessionMachineFactory(self, protocol):
return self.sessionMachine
def test_makeConnection_fails(self):
'''You can't call makeConnection on a
RequestSessionProtocolWrapper.
'''
with self.assertRaises(RuntimeError):
self.protocol.makeConnection('ignored')
def test_attached(self):
'''The attached property returns True iff a request is attached.'''
self.assertFalse(self.protocol.request)
self.assertFalse(self.protocol.attached)
self.protocol.request = self.request
self.assertTrue(self.protocol.attached)
def test_makeConnectionFromRequest(self):
'''makeConnectionFromRequest has the session state machine attach the
request.
'''
self.protocol.makeConnectionFromRequest(self.request)
self.assertEqual(self.sessionMachineRecorder.attachedRequests,
[self.request])
def test_detachFromRequest(self):
'''detachFromRequest has the session state machine perform the detach.
'''
self.protocol.detachFromRequest()
self.assertEqual(self.sessionMachineRecorder.detachCalls, 1)
def test_write(self):
'''write adds a newline before writing the data to the current
request.
'''
self.protocol.request = self.request
self.protocol.write(b'something')
self.assertEqual(self.request.written, [b'something\n'])
def test_closeOtherRequest(self):
'''closeOtherRequest writes a close frame consisting of a reason and a
newline to a request.
'''
self.protocol.closeOtherRequest(self.request, P.DISCONNECT.GO_AWAY)
self.assertEqual(self.request.written, [b'c[3000,"Go away!"]\n'])
def test_dataReceived(self):
'''dataReceived passes the data off to the session state machine.
'''
self.protocol.dataReceived('something')
self.assertEqual(self.sessionMachineRecorder.receivedData,
['something'])
def test_writeData(self):
'''writeData passes the data off to the session state machine.'''
self.protocol.writeData('something')
self.assertEqual(self.sessionMachineRecorder.dataWritten,
['something'])
def test_writeHeartbeat(self):
'''writeHeartbeat has the session state machine write a heartbeat.'''
self.protocol.writeHeartbeat()
self.assertEqual(self.sessionMachineRecorder.heartbeatCalls, 1)
def test_writeClose(self):
'''writeClose has the session state machine close the request.'''
self.protocol.writeClose("reason")
self.assertEqual(self.sessionMachineRecorder.closeReasonsWritten,
["reason"])
def test_loseConnection(self):
'''loseConnection tells the session state machine to lose the
connection and the timeout clock to start, but does both only once.
'''
self.protocol.loseConnection()
self.assertTrue(self.protocol.disconnecting)
self.assertEqual(self.sessionMachineRecorder.loseConnectionCalls, 1)
self.assertEqual(self.timeoutClockRecorder.startCalls, 1)
self.protocol.loseConnection()
self.assertTrue(self.protocol.disconnecting)
self.assertEqual(self.sessionMachineRecorder.loseConnectionCalls, 1)
self.assertEqual(self.timeoutClockRecorder.startCalls, 1)
def test_connectionLost_disconnecting(self):
'''If connectionLost has been called after loseConnection, then this
connection will linger in a disconnected state until the
timeout expires. The protocol's terminationDeferred does not
fire and the timeout clock is not stopped, but the session
machine learns about the lost connection.
'''
self.protocol.disconnecting = 1
self.protocol.connectionLost("reason")
unfiredDeferred = self.protocol.terminationDeferred
with self.assertRaises(AttributeError):
unfiredDeferred.result
self.assertFalse(self.timeoutClockRecorder.stopCalls)
self.assertEqual(self.sessionMachineRecorder.connectionsLostReasons,
['reason'])
self.assertIsNone(self.protocol.sessionMachine)
def test_connectionLost_clientClose(self):
'''If connectionLost is called because the client closed the
connection, then this connection has disappeared suddenly.
Consequently, the protocol's terminationDeferred errbacks with
the provided reason, the timeout clock is stopped, and the
session machine learns about the lost connection.
'''
erroredDeferred = self.protocol.terminationDeferred
def trapConnectionDone(failure):
failure.trap(error.ConnectionDone)
erroredDeferred.addErrback(trapConnectionDone)
self.protocol.connectionLost(connectionDone)
self.assertEqual(self.timeoutClockRecorder.stopCalls, 1)
self.assertEqual(self.sessionMachineRecorder.connectionsLostReasons,
[connectionDone])
self.assertIsNone(self.protocol.sessionMachine)
return erroredDeferred
def test_consumerProducer_notImplemented(self):
'''Registration of consumers and producers is not implemented.'''
with self.assertRaises(NotImplementedError):
self.protocol.registerProducer(None, None)
with self.assertRaises(NotImplementedError):
self.protocol.unregisterProducer()
def test_beginRequest_timeout_reset(self):
'''Beginning a request resets the timeout.
'''
self.protocol.request = self.request
self.protocol.beginRequest()
self.assertTrue(self.timeoutClockRecorder.resetCalls, 1)
def test_beginRequest_finishedNotifier_forwards_failures(self):
'''Beginning a request retrieves a Deferred from that request that
forwards failures to the protocol's connectionLost.
'''
self.protocol.request = self.request
self.protocol.beginRequest()
reason = connectionDone
def assertConnectionLostCalled(ignored):
recordedExceptions = [
reason.value for reason in
self.sessionMachineRecorder.connectionsLostReasons]
self.assertEqual(recordedExceptions, [reason.value])
finishedNotifier = self.protocol.finishedNotifier
finishedNotifier.addCallback(assertConnectionLostCalled)
def trapConnectionDone(failure):
failure.trap(error.ConnectionDone)
terminationDeferred = self.protocol.terminationDeferred
terminationDeferred.addErrback(trapConnectionDone)
self.request.processingFailed(reason)
return DeferredList([finishedNotifier, terminationDeferred])
def test_beginRequest_finishedNotifier_traps_cancellation(self):
'''Beginning a request retrieves a Deferred from the request that
traps cancellation errors, preventing them from reaching the
protocol's connectionLost.
'''
self.protocol.request = self.request
self.protocol.beginRequest()
finishedNotifier = self.protocol.finishedNotifier
def assertConnectionLostNotCalled(ignored):
self.assertEqual(
self.sessionMachineRecorder.connectionsLostReasons,
[])
finishedNotifier.addCallback(assertConnectionLostNotCalled)
finishedNotifier.cancel()
return finishedNotifier
def test_establishConnection(self):
'''Establishing a connection makes the RequestSessionProtocolWrapper
instance directly provide the same interface as the request's
transport, but does *not* call makeConnection, and thus
connectionMade, on the wrapped protocol. That's because we may
decide to immediately close the request as part of the polling
transport handshake. This lets us interpose state changes
that set up buffering between the handshake and the protocol's
connectionMade logic.
'''
class IStubTransport(Interface):
pass
@implementer(IStubTransport)
class StubTransport:
pass
# Looking up what RequestSessionProtocolWrapper implements
# also mutates the class. It adds __implemented__ and
# __providedBy__ attributes to it. These prevent __getattr__
# from causing the IStubTransport.providedBy call below from
# returning True. If, by accident, nothing else causes these
# attributes to be added to ProtocolWrapper, the test will
# pass, but the interface will only be provided until
# something does trigger their addition. So we just trigger
# it right now to be sure.
implementedBy(P.RequestSessionProtocolWrapper)
self.request.transport = StubTransport()
self.protocol.establishConnection(self.request)
self.assertTrue(IStubTransport.providedBy(self.protocol))
self.assertFalse(self.protocol.wrappedProtocol.connectionMadeCalls)
def test_completeConnection(self):
'''Completing a connection attaches the RequestSessionProtocolWrapper
instance to the wrapped protocol as the wrapped protocol's
transport and completes the Protocol's connection.
'''
self.protocol.completeConnection()
self.assertIs(self.protocol.wrappedProtocol.transport, self.protocol)
self.assertEqual(self.protocol.wrappedProtocol.connectionMadeCalls,
1)
def test_completeDataReceived(self):
'''Completing data reception passes that data on to the wrapped
protocol.
'''
self.protocol.completeDataReceived(b'["a"]')
self.assertEqual(self.receivedData, [["a"]])
def test_completeWrite(self):
'''Completing a write serializes the data to the request.'''
self.protocol.request = self.request
self.protocol.completeWrite(["a"])
self.assertEqual(self.request.written, [b'a["a"]\n'])
def test_completeHeartbeat(self):
'''Completing a write serializes the data to the request.'''
self.protocol.request = self.request
self.protocol.completeHeartbeat()
self.assertEqual(self.request.written, [b'h\n'])
def test_completeConnectionLost(self):
'''Completing a lost connection calls the wrapped protocol's
connectionLost.
'''
self.request.transport = StringTransport()
self.protocol.establishConnection(self.request)
self.protocol.completeConnectionLost(connectionDone)
self.assertEqual(self.connectionsLost, [connectionDone])
def test_completeLoseConnection(self):
'''Completing losing a connection calls the wrapped protocol's
loseConnection.
'''
self.protocol.transport = transport = StringTransport()
self.protocol.completeLoseConnection()
self.assertTrue(transport.disconnecting)
def test_finishCurrentRequest(self):
'''Finishing the current request fires the finishedNotifer, calls
finish on the request, unsets the protocol's request and
finishedNotifier, and starts the timeout clock.
'''
self.protocol.request = self.request
self.protocol.beginRequest()
finishedNotifier = self.protocol.finishedNotifier
self.protocol.finishCurrentRequest()
self.assertGreater(self.request.finished, 0)
self.assertFalse(self.protocol.attached)
self.assertIsNone(self.protocol.finishedNotifier)
self.assertEqual(self.timeoutClockRecorder.startCalls, 1)
return finishedNotifier
def test_timedOutCallback(self):
'''The termination deferred's callback sets disconnecting and calls
connectionLost. Setting disconnecting avoids errbacking the
deferred that's just been fired!
'''
terminationDeferred = self.protocol.terminationDeferred
def assertConnectionLostCalled(ignored):
self.assertTrue(self.protocol.disconnecting)
self.assertEqual(self.sessionMachineRecorder.connectionsLost,
connectionDone)
terminationDeferred.callback(P.TimeoutClock.EXPIRED)
return terminationDeferred
class SessionHouseTestCase(unittest.TestCase):
def setUp(self):
self.sessions = P.SessionHouse()
self.sessionID = b'session'
self.request = DummyRequest([b'server', self.sessionID, b'ignored'])
self.request.transport = StringTransport()
self.recorder = RecordsRequestSessionActions()
self.protocol = FakeRequestSessionProtocolWrapper(self.recorder)
def buildProtocol(self, address):
return self.protocol
def test_validateAndExtraSessionID(self):
'''Invalid server or session IDs result in None, while valid ones
result in a sessionID.
'''
noIDs = DummyRequest([])
self.assertIsNone(self.sessions.validateAndExtractSessionID(noIDs))
emptyIDs = DummyRequest([b'', b'', b''])
self.assertIsNone(self.sessions.validateAndExtractSessionID(emptyIDs))
hasDot = DummyRequest([b'server', b'session', b'has.thatdot'])
self.assertIsNone(self.sessions.validateAndExtractSessionID(hasDot))
self.assertEqual(
self.sessions.validateAndExtractSessionID(self.request),
b'session')
def test_attachToSession_returns_False(self):
'''attachToSession returns False if a request with invalid IDs
attempts to attaches to a session.
'''
self.assertFalse(self.sessions.attachToSession(self, DummyRequest([])))
def test_attachToSession_new_session(self):
'''attachToSession creates a new session when given a request with a
novel and valid session ID.
'''
self.assertTrue(self.sessions.attachToSession(self, self.request))
self.assertIs(self.sessions.sessions[self.sessionID], self.protocol)
self.assertEqual(self.recorder.connectionsMadeFromRequest,
[self.request])
def test_sessionClosed_on_callback(self):
'''Firing the protocol's terminationDeferred removes the session from
the house.
'''
self.test_attachToSession_new_session()
self.protocol.terminationDeferred.callback(None)
self.assertNotIn(self.sessionID, self.sessions.sessions)
return self.protocol.terminationDeferred
def test_sessionClosed_on_errback(self):
'''Errbacking the protocol's terminationDeferred removes the session
from the house.
'''
self.test_attachToSession_new_session()
self.protocol.terminationDeferred.errback(connectionDone)
self.assertNotIn(self.sessionID, self.sessions.sessions)
return self.protocol.terminationDeferred
def test_attachToSession_existing_session(self):
'''attachToSession returns the existing session when given a request
with a duplicate and valid session ID.
'''
self.test_attachToSession_new_session()
self.assertTrue(self.sessions.attachToSession(self, self.request))
self.assertIs(self.sessions.sessions[self.sessionID], self.protocol)
self.assertEqual(self.recorder.connectionsMadeFromRequest,
[self.request, self.request])
def test_writeToSession_returns_false(self):
'''writeToSession with an invalid session ID returns False.'''
self.assertFalse(self.sessions.writeToSession(DummyRequest([])))
def test_writeToSession_missing_session(self):
'''writingToSession with valid but unknown session ID returns False.'''
unknownSession = self.sessionID * 2
self.assertFalse(self.sessions.writeToSession(
DummyRequest([b'server',
unknownSession,
b'ignored'])))
def test_writeToSession_existing_session(self):
'''writeToSession with a valid and known session ID returns True and
passes the request's content to the session's dataReceived.
'''
data = b'some data!'
self.request.content = io.BytesIO(data)
self.test_attachToSession_new_session()
self.assertTrue(self.sessions.writeToSession(self.request))
self.assertEqual(self.recorder.receivedData, [data])
class XHRSessionTestCase(RequestSessionProtocolWrapperTestCase):
def makeFactory(self):
return P.XHRSessionFactory(self.wrappedFactory)
def test_writeOpen(self):
'''XHRSession detaches the request immediately after writing an open
frame.
'''
self.protocol.request = self.request
self.protocol.writeOpen()
self.assertEqual(self.sessionMachineRecorder.detachCalls, 1)
def test_writeData(self):
'''XHRSession detaches the request immediately after writing any
data frame.
'''
self.protocol.request = self.request
self.protocol.writeData(['ignored'])
self.assertEqual(self.sessionMachineRecorder.detachCalls, 1)
class XHRStreamingSessionTestCase(RequestSessionProtocolWrapperTestCase):
maximumBytes = 128
def makeFactory(self):
return P.XHRStreamingSessionFactory(maximumBytes=self.maximumBytes,
wrappedFactory=self.wrappedFactory)
def test_writeOpen(self):
'''XHRStreamingSession writes a large prelude when establishing a
connection.
'''
self.protocol.request = self.request
self.protocol.writeOpen()
self.assertEqual(self.request.written, [b'h' * 2048 + b'\n',
b'o\n'])
self.assertEqual(self.sessionMachineRecorder.detachCalls, 0)
def test_completeWrite(self):
'''XHRStreamingSession detaches the request after writing at least
maximumBytes.
'''
self.protocol.request = self.request
self.protocol.completeWrite(['ignored'])
self.assertEqual(self.sessionMachineRecorder.detachCalls, 0)
self.protocol.completeWrite(['ignored' * self.maximumBytes])
self.assertEqual(self.sessionMachineRecorder.detachCalls, 1)
class WebSocketProtocolWrapperTestCase(SockJSWireProtocolWrapperTestCase):
def makeFactory(self):
return P.WebSocketWrappingFactory(self.wrappedFactory)
def test_emptyDataReceived(self):
'''dataReceived silently discards empty strings and does not call the
wrapped protocol's dataReceived.
'''
self.protocol.dataReceived(b'')
self.assertFalse(self.receivedData)
def test_badJSONReceived(self):
'''dataReceived silently closes the connection upon receipt of
malformed JSON and does not call the wrapped protocol's
dataReceived.
'''
self.protocol.dataReceived(b'!!!')
self.assertTrue(self.transport.disconnecting)
self.assertFalse(self.receivedData)
class WebSocketServerProtocolTestCase(unittest.TestCase):
def setUp(self):
self.receivedData = []
self.connectionsLost = []
self.wrappedFactory = RecordingProtocolFactory(self.receivedData,
self.connectionsLost)
buildProtocol = self.wrappedFactory.buildProtocol
def _buildProtocol(addr):
self.wrappedProtocol = buildProtocol(addr)
return self.wrappedProtocol
self.wrappedFactory.buildProtocol = _buildProtocol
self.factory = P.WebSocketSessionFactory(self.wrappedFactory)
self.address = IPv4Address('TCP', '127.0.0.1', 80)
self.protocol = self.factory.buildProtocol(self.address)
def makeFakeRequest(self):
'''This is laborious enough to warrant its own shortcut.'''
return A.ConnectionRequest(peer='ignored',
headers={},
host='ignored',
path='ignored',
params={},
version=-1,
origin=None,
protocols=[],
extensions=[])
def test_onConnect_text(self):
'''onConnect sets _binaryMode to True iff one of the protocols has
'binary' in it.
'''
notBinary = self.makeFakeRequest()
self.protocol.onConnect(notBinary)
self.assertFalse(self.protocol._binaryMode)
def test_onConnect_binary(self):
'''onConnect sets _binaryMode to True iff one of the protocols has
'binary' in it.
'''
binary = self.makeFakeRequest()
binary.protocols.append(b'binary')
self.protocol.onConnect(binary)
self.assertTrue(self.protocol._binaryMode)
def test_onOpen(self):
'''onOpen calls the underlying protocol's makeConnection method with
_WebSocketServerProtocol instance as the transport.
'''
self.protocol.onOpen()
self.assertEqual(self.wrappedProtocol.connectionMadeCalls, 1)
def test_write_text(self):
'''write does base64 encode text data.'''
# autobahn is very difficult to test -- fake out the
# sendMessage method
sentMessages = []
def recordSendMessage(data, isBinary):
sentMessages.append((data, isBinary))
self.test_onConnect_text()
self.protocol.sendMessage = recordSendMessage
self.protocol.write(b'some data')
self.assertEqual(sentMessages, [(b'some data', False)])
def test_write_binary(self):
'''write does base64 encode binary data.'''
sentMessages = []
def recordSendMessage(data, isBinary):
sentMessages.append((data, isBinary))
self.test_onConnect_binary()
self.protocol.sendMessage = recordSendMessage
self.protocol.write(b'some data')
self.assertEqual(sentMessages, [(b'some data', True)])
def test_onMessage_succeeds(self):
'''When the received message matches the binary mode of the
connection, the underlying protocol receives the message as
deserialized JSON.
'''
self.test_onConnect_text()
self.protocol.onMessage(b'["some data"]', isBinary=False)
self.assertEqual(self.receivedData, [['some data']])
def test_onMessage_is_binary_disagreement(self):
'''When the received message does not match the binary mode of the
connection, the connection fails and the underlying protocol
does not receive the message.
'''
failedConnectionReasons = []
def recordFailConnection(reason, message):
failedConnectionReasons.append(reason)
self.test_onConnect_binary()
self.protocol.failConnection = recordFailConnection
self.protocol.onMessage(b'["some data"]', isBinary=False)
self.assertEqual(self.receivedData, [])
self.assertEqual(failedConnectionReasons, [
self.protocol.CLOSE_STATUS_CODE_UNSUPPORTED_DATA])
|
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from collections import defaultdict
from flask import g
from sqlalchemy import inspect
from indico.core import signals
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.modules.attachments.models.attachments import Attachment
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.categories.models.categories import Category
from indico.modules.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.notes.models.notes import EventNote
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.entries import TimetableEntryType
from indico_livesync.models.queue import ChangeType, LiveSyncQueueEntry
from indico_livesync.util import get_excluded_categories, obj_ref
def connect_signals(plugin):
# request
plugin.connect(signals.core.after_process, _apply_changes)
# moved
plugin.connect(signals.category.moved, _moved)
plugin.connect(signals.event.moved, _moved)
# created
plugin.connect(signals.event.created, _created)
plugin.connect(signals.event.restored, _restored)
plugin.connect(signals.event.contribution_created, _created)
plugin.connect(signals.event.subcontribution_created, _created)
# deleted
plugin.connect(signals.event.deleted, _deleted)
plugin.connect(signals.event.contribution_deleted, _deleted)
plugin.connect(signals.event.subcontribution_deleted, _deleted)
# updated
plugin.connect(signals.event.updated, _updated)
plugin.connect(signals.event.contribution_updated, _updated)
plugin.connect(signals.event.subcontribution_updated, _updated)
# event times
plugin.connect(signals.event.times_changed, _event_times_changed, sender=Event)
plugin.connect(signals.event.times_changed, _event_times_changed, sender=Contribution)
# location
plugin.connect(signals.event.location_changed, _location_changed, sender=Event)
plugin.connect(signals.event.location_changed, _location_changed, sender=Contribution)
plugin.connect(signals.event.location_changed, _location_changed, sender=Session)
plugin.connect(signals.event.location_changed, _session_block_location_changed, sender=SessionBlock)
# timetable
plugin.connect(signals.event.timetable_entry_created, _timetable_changed)
plugin.connect(signals.event.timetable_entry_updated, _timetable_changed)
plugin.connect(signals.event.timetable_entry_deleted, _timetable_changed)
# protection
plugin.connect(signals.acl.protection_changed, _category_protection_changed, sender=Category)
plugin.connect(signals.acl.protection_changed, _protection_changed, sender=Event)
plugin.connect(signals.acl.protection_changed, _protection_changed, sender=Session)
plugin.connect(signals.acl.protection_changed, _protection_changed, sender=Contribution)
# ACLs
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Category)
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Event)
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Session)
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Contribution)
# notes
plugin.connect(signals.event.notes.note_added, _created)
plugin.connect(signals.event.notes.note_restored, _restored)
plugin.connect(signals.event.notes.note_deleted, _deleted)
plugin.connect(signals.event.notes.note_modified, _updated)
# attachments
plugin.connect(signals.attachments.folder_deleted, _attachment_folder_deleted)
plugin.connect(signals.attachments.attachment_created, _created)
plugin.connect(signals.attachments.attachment_deleted, _attachment_deleted)
plugin.connect(signals.attachments.attachment_updated, _updated)
plugin.connect(signals.acl.protection_changed, _attachment_folder_protection_changed, sender=AttachmentFolder)
plugin.connect(signals.acl.protection_changed, _protection_changed, sender=Attachment)
plugin.connect(signals.acl.entry_changed, _attachment_folder_acl_entry_changed, sender=AttachmentFolder)
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Attachment)
def _is_category_excluded(category):
excluded_categories = get_excluded_categories()
return any(c.id in excluded_categories for c in category.chain_query)
def _moved(obj, old_parent, **kwargs):
# if an unlisted event is moved, it triggers a creation instead
if isinstance(obj, Event) and old_parent is None:
_register_change(obj, ChangeType.created)
else:
_register_change(obj, ChangeType.moved)
new_category = obj if isinstance(obj, Category) else obj.category
old_excluded = _is_category_excluded(old_parent) if old_parent else False
new_excluded = _is_category_excluded(new_category)
if old_excluded != new_excluded:
_register_change(obj, ChangeType.unpublished if new_excluded else ChangeType.published)
if obj.is_inheriting:
# If protection is inherited, check whether it changed
category_protection = old_parent.effective_protection_mode if old_parent else None
new_category_protection = obj.protection_parent.effective_protection_mode
# Protection of new parent is different
if category_protection != new_category_protection:
_register_change(obj, ChangeType.protection_changed)
def _created(obj, **kwargs):
if not isinstance(obj, (Event, EventNote, Attachment, Contribution, SubContribution)):
raise TypeError(f'Unexpected object: {type(obj).__name__}')
_register_change(obj, ChangeType.created)
def _restored(obj, **kwargs):
_register_change(obj, ChangeType.undeleted)
def _deleted(obj, **kwargs):
_register_deletion(obj)
def _updated(obj, **kwargs):
_register_change(obj, ChangeType.data_changed)
def _event_times_changed(sender, obj, **kwargs):
_register_change(obj, ChangeType.data_changed)
def _session_block_location_changed(sender, obj, **kwargs):
for contrib in obj.contributions:
_register_change(contrib, ChangeType.location_changed)
def _location_changed(sender, obj, **kwargs):
_register_change(obj, ChangeType.location_changed)
def _timetable_changed(entry, **kwargs):
if entry.type == TimetableEntryType.CONTRIBUTION:
_register_change(entry.object, ChangeType.data_changed)
def _category_protection_changed(sender, obj, mode, old_mode, **kwargs):
parent_mode = obj.protection_parent.effective_protection_mode if obj.protection_parent else None
if ((old_mode == ProtectionMode.inheriting and parent_mode == mode) or
(old_mode == parent_mode and mode == ProtectionMode.inheriting)):
return
_protection_changed(sender, obj, mode=mode, old_mode=old_mode, **kwargs)
def _protection_changed(sender, obj, **kwargs):
if not inspect(obj).persistent:
return
_register_change(obj, ChangeType.protection_changed)
def _acl_entry_changed(sender, obj, entry, old_data, **kwargs):
if not inspect(obj).persistent:
return
register = False
# entry deleted
if entry is None and old_data is not None:
register = True
# entry added
elif entry is not None and old_data is None:
register = True
# entry updated
elif entry is not None and old_data is not None:
old_access = bool(old_data['read_access'] or old_data['full_access'] or old_data['permissions'])
new_access = bool(entry.full_access or entry.read_access or entry.permissions)
register = old_access != new_access
if register:
_register_change(obj, ChangeType.protection_changed)
def _attachment_folder_deleted(folder, **kwargs):
if folder.link_type not in (LinkType.event, LinkType.contribution, LinkType.subcontribution):
return
for attachment in folder.attachments:
_register_deletion(attachment)
def _attachment_deleted(attachment, **kwargs):
if attachment.folder.link_type not in (LinkType.event, LinkType.contribution, LinkType.subcontribution):
return
_register_deletion(attachment)
def _attachment_folder_protection_changed(sender, obj, **kwargs):
if not inspect(obj).persistent:
return
if obj.link_type not in (LinkType.event, LinkType.contribution, LinkType.subcontribution):
return
for attachment in obj.attachments:
_register_change(attachment, ChangeType.protection_changed)
def _attachment_folder_acl_entry_changed(sender, obj, entry, old_data, **kwargs):
if not inspect(obj).persistent:
return
if obj.link_type not in (LinkType.event, LinkType.contribution, LinkType.subcontribution):
return
for attachment in obj.attachments:
_acl_entry_changed(type(attachment), attachment, entry, old_data)
def _apply_changes(sender, **kwargs):
if not hasattr(g, 'livesync_changes'):
return
excluded_categories = get_excluded_categories()
for ref, changes in g.livesync_changes.items():
LiveSyncQueueEntry.create(changes, ref, excluded_categories=excluded_categories)
def _register_deletion(obj):
_init_livesync_g()
g.livesync_changes[obj_ref(obj)].add(ChangeType.deleted)
def _register_change(obj, action):
if not isinstance(obj, Category):
event = obj.folder.event if isinstance(obj, Attachment) else obj.event
if event is None or event.is_deleted:
# When deleting an event we get data change signals afterwards. We can simple ignore them.
# Also, ACL changes during user merges might involve deleted objects which we also don't care about
return
_init_livesync_g()
g.livesync_changes[obj_ref(obj)].add(action)
def _init_livesync_g():
g.setdefault('livesync_changes', defaultdict(set))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid unicode character processing tables
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
import re
from urwid.compat import bytes, B, ord2
SAFE_ASCII_RE = re.compile("^[ -~]*$")
SAFE_ASCII_BYTES_RE = re.compile(B("^[ -~]*$"))
_byte_encoding = None
# GENERATED DATA
# generated from
# http://www.unicode.org/Public/4.0-Update/EastAsianWidth-4.0.0.txt
widths = [
(126, 1),
(159, 0),
(687, 1),
(710, 0),
(711, 1),
(727, 0),
(733, 1),
(879, 0),
(1154, 1),
(1161, 0),
(4347, 1),
(4447, 2),
(7467, 1),
(7521, 0),
(8369, 1),
(8426, 0),
(9000, 1),
(9002, 2),
(11021, 1),
(12350, 2),
(12351, 1),
(12438, 2),
(12442, 0),
(19893, 2),
(19967, 1),
(55203, 2),
(63743, 1),
(64106, 2),
(65039, 1),
(65059, 0),
(65131, 2),
(65279, 1),
(65376, 2),
(65500, 1),
(65510, 2),
(120831, 1),
(262141, 2),
(1114109, 1),
]
# ACCESSOR FUNCTIONS
def get_width( o ):
"""Return the screen column width for unicode ordinal o."""
global widths
if o == 0xe or o == 0xf:
return 0
for num, wid in widths:
if o <= num:
return wid
return 1
def decode_one( text, pos ):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
"""
assert isinstance(text, bytes), text
b1 = ord2(text[pos])
if not b1 & 0x80:
return b1, pos+1
error = ord("?"), pos+1
lt = len(text)
lt = lt-pos
if lt < 2:
return error
if b1 & 0xe0 == 0xc0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
o = ((b1&0x1f)<<6)|(b2&0x3f)
if o < 0x80:
return error
return o, pos+2
if lt < 3:
return error
if b1 & 0xf0 == 0xe0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
o = ((b1&0x0f)<<12)|((b2&0x3f)<<6)|(b3&0x3f)
if o < 0x800:
return error
return o, pos+3
if lt < 4:
return error
if b1 & 0xf8 == 0xf0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
b4 = ord2(text[pos+2])
if b4 & 0xc0 != 0x80:
return error
o = ((b1&0x07)<<18)|((b2&0x3f)<<12)|((b3&0x3f)<<6)|(b4&0x3f)
if o < 0x10000:
return error
return o, pos+4
return error
def decode_one_uni(text, i):
"""
decode_one implementation for unicode strings
"""
return ord(text[i]), i+1
def decode_one_right(text, pos):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
pos is assumed to be on the trailing byte of a utf-8 sequence.
"""
assert isinstance(text, bytes), text
error = ord("?"), pos-1
p = pos
while p >= 0:
if ord2(text[p])&0xc0 != 0x80:
o, next = decode_one( text, p )
return o, p-1
p -=1
if p == p-4:
return error
def set_byte_encoding(enc):
assert enc in ('utf8', 'narrow', 'wide')
global _byte_encoding
_byte_encoding = enc
def get_byte_encoding():
return _byte_encoding
def calc_text_pos(text, start_offs, end_offs, pref_col):
"""
Calculate the closest position to the screen column pref_col in text
where start_offs is the offset into text assumed to be screen column 0
and end_offs is the end of the range to search.
text may be unicode or a byte string in the target _byte_encoding
Returns (position, actual_col).
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if unis or utfs:
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
if w+sc > pref_col:
return i, sc
i = n
sc += w
return i, sc
assert type(text) == bytes, repr(text)
# "wide" and "narrow"
i = start_offs+pref_col
if i >= end_offs:
return end_offs, end_offs-start_offs
if _byte_encoding == "wide":
if within_double_byte(text, start_offs, i) == 2:
i -= 1
return i, i-start_offs
def calc_width(text, start_offs, end_offs):
"""
Return the screen column width of text between start_offs and end_offs.
text may be unicode or a byte string in the target _byte_encoding
Some characters are wide (take two columns) and others affect the
previous character (take zero columns). Use the widths table above
to calculate the screen column width of text[start_offs:end_offs]
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if (unis and not SAFE_ASCII_RE.match(text)
) or (utfs and not SAFE_ASCII_BYTES_RE.match(text)):
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
i = n
sc += w
return sc
# "wide", "narrow" or all printable ASCII, just return the character count
return end_offs - start_offs
def is_wide_char(text, offs):
"""
Test if the character at offs within text is wide.
text may be unicode or a byte string in the target _byte_encoding
"""
if isinstance(text, str):
o = ord(text[offs])
return get_width(o) == 2
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o, n = decode_one(text, offs)
return get_width(o) == 2
if _byte_encoding == "wide":
return within_double_byte(text, offs, offs) == 1
return False
def move_prev_char(text, start_offs, end_offs):
"""
Return the position of the character before end_offs.
"""
assert start_offs < end_offs
if isinstance(text, str):
return end_offs-1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = end_offs-1
while ord2(text[o])&0xc0 == 0x80:
o -= 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, end_offs-1) == 2:
return end_offs-2
return end_offs-1
def move_next_char(text, start_offs, end_offs):
"""
Return the position of the character after start_offs.
"""
assert start_offs < end_offs
if isinstance(text, str):
return start_offs+1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = start_offs+1
while o<end_offs and ord2(text[o])&0xc0 == 0x80:
o += 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, start_offs) == 1:
return start_offs +2
return start_offs+1
def within_double_byte(text, line_start, pos):
"""Return whether pos is within a double-byte encoded character.
text -- byte string in question
line_start -- offset of beginning of line (< pos)
pos -- offset in question
Return values:
0 -- not within dbe char, or double_byte_encoding == False
1 -- pos is on the 1st half of a dbe char
2 -- pos is on the 2nd half of a dbe char
"""
assert isinstance(text, bytes)
v = ord2(text[pos])
if v >= 0x40 and v < 0x7f:
# might be second half of big5, uhc or gbk encoding
if pos == line_start: return 0
if ord2(text[pos-1]) >= 0x81:
if within_double_byte(text, line_start, pos-1) == 1:
return 2
return 0
if v < 0x80: return 0
i = pos -1
while i >= line_start:
if ord2(text[i]) < 0x80:
break
i -= 1
if (pos - i) & 1:
return 1
return 2
# TABLE GENERATION CODE
def process_east_asian_width():
import sys
out = []
last = None
for line in sys.stdin.readlines():
if line[:1] == "#": continue
line = line.strip()
hex,rest = line.split(";",1)
wid,rest = rest.split(" # ",1)
word1 = rest.split(" ",1)[0]
if "." in hex:
hex = hex.split("..")[1]
num = int(hex, 16)
if word1 in ("COMBINING","MODIFIER","<control>"):
l = 0
elif wid in ("W", "F"):
l = 2
else:
l = 1
if last is None:
out.append((0, l))
last = l
if last == l:
out[-1] = (num, l)
else:
out.append( (num, l) )
last = l
print("widths = [")
for o in out[1:]: # treat control characters same as ascii
print("\t%r," % (o,))
print("]")
if __name__ == "__main__":
process_east_asian_width()
|
|
#!/usr/bin/env python
from StringIO import StringIO
from unittest import TestCase, main
from collections import defaultdict
from americangut.results_utils import (
filter_mapping_file, clean_and_reformat_mapping,
count_unique_sequences_per_otu, write_bloom_fasta
)
class ResultsUtilsTests(TestCase):
def setUp(self):
reformat_mapping_testdata.seek(0)
def test_filter_mapping_file(self):
output = StringIO()
# filter to just fecal samples, keep the age and title_acronym columns
criteria = {'SIMPLE_BODY_SITE': lambda x: x == 'FECAL',
'AGE': lambda x: float(x) > 20,
'TITLE_ACRONYM': None}
filter_mapping_file(filter_mapping_testdata, output, criteria)
output.seek(0)
# parse output
test_mapping = [l.strip().split('\t') for l in output]
# fish header, verify sanity of it
test_header = test_mapping[0]
self.assertEqual(len(test_header), 4)
self.assertEqual(test_header[0], '#SampleID')
self.assertEqual(sorted(test_header), sorted(['#SampleID',
'SIMPLE_BODY_SITE',
'AGE', 'TITLE_ACRONYM']))
# check each record
test_sbs = test_header.index('SIMPLE_BODY_SITE')
test_age = test_header.index('AGE')
for l in test_mapping[1:]:
self.assertEqual(len(l), 4)
self.assertEqual(l[test_sbs], 'FECAL')
self.assertTrue(float(l[test_age]) > 20)
def test_clean_and_reformat_mapping(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
is_pgp = ['A', 'C']
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test', pgp_ids=is_pgp)
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-7:], ['IS_PGP', 'SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE', 'AGE_CATEGORY',
'BMI_CATEGORY'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'Yes', 'ORAL', 'test', 'test-ORAL',
'ORAL', '40s', 'Underweight'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'No', 'FECAL', 'test', 'test-FECAL',
'FECAL', '50s', 'Underweight'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'Yes', 'FECAL', 'test', 'test-FECAL',
'FECAL', 'Child', 'Underweight'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'No', 'SKIN', 'test', 'test-SKIN',
'SKIN', '50s', 'Severely obese'])
def test_clean_and_reformat_mapping_nopgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-7:], ['IS_PGP', 'SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE', 'AGE_CATEGORY',
'BMI_CATEGORY'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'No', 'ORAL', 'test', 'test-ORAL',
'ORAL', '40s', 'Underweight'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'No', 'FECAL', 'test', 'test-FECAL',
'FECAL', '50s', 'Underweight'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'No', 'FECAL', 'test', 'test-FECAL',
'FECAL', 'Child', 'Underweight'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'No', 'SKIN', 'test', 'test-SKIN',
'SKIN', '50s', 'Severely obese'])
def test_clean_and_reformat_mapping_allpgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test', pgp_ids=True)
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-7:], ['IS_PGP', 'SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE', 'AGE_CATEGORY',
'BMI_CATEGORY'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'Yes', 'ORAL', 'test', 'test-ORAL',
'ORAL', '40s', 'Underweight'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'Yes', 'FECAL', 'test', 'test-FECAL',
'FECAL', '50s', 'Underweight'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'Yes', 'FECAL', 'test', 'test-FECAL',
'FECAL', 'Child', 'Underweight'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'Yes', 'SKIN', 'test', 'test-SKIN',
'SKIN', '50s', 'Severely obese'])
def test_count_unique_sequences_per_otu(self):
input_fasta = StringIO(test_fasta)
otu_map = StringIO(test_otu_map)
otu_ids = set(['otu1', 'otu2'])
result = count_unique_sequences_per_otu(otu_ids, otu_map, input_fasta)
expected = {x:defaultdict(int) for x in otu_ids}
expected['otu1']['ATCG'] = 3
expected['otu2']['AT'] = 2
expected['otu2']['A'] = 1
self.assertEqual(expected, result)
def test_write_bloom_fasta(self):
otu_ids = set(['otu1', 'otu2'])
unique_counts = {x:defaultdict(int) for x in otu_ids}
unique_counts['otu1']['ATCG'] = 3
unique_counts['otu2']['AT'] = 2
unique_counts['otu2']['A'] = 1
result = StringIO()
write_bloom_fasta(unique_counts, result, 0.67)
result.seek(0)
self.assertEqual(result.read(), '>otu1_1\nATCG\n')
filter_mapping_testdata = StringIO(
"""#SampleID COUNTRY TITLE_ACRONYM AGE SIMPLE_BODY_SITE
A United States of America AGP 43.0 ORAL
B United States of America foo 51.0 FECAL
C United States of America bar 12.0 FECAL
D United States of America AGP 32.0 SKIN
E United States of America AGP 56.0 FECAL
""")
reformat_mapping_testdata = StringIO(
"""#SampleID COUNTRY AGE BODY_SITE BMI
A GAZ:w00t 43.0 UBERON_mucosa_of_tongue 5
B GAZ:left 51.0 UBERON:FECES 10
C GAZ:right 12.0 UBERON_FECES 15
D GAZ:stuff 32.0 unknown 26
E GAZ:stuff 56.0 UBERON:SKIN 37
""")
# Inputs for count_unique_seqs_per_otu
test_fasta = """>sample1_1 yea
ATCG
>sample1_2 awyea
ATCG
>sample2_1 dumb
ATCG
>sample2_2 dummy
AT
>sample2_3 wow
AT
>sample2_4 wowagain
A
>sample9_1
ATGC
>sample9_2
A
"""
test_otu_map = """otu1 sample1_1 sample1_2 sample2_1
otu2 sample2_2 sample2_3 sample2_4
otu3 sample9_1 smaple9_2
"""
if __name__ == '__main__':
main()
|
|
from __future__ import division, absolute_import, print_function
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
block : Assemble arrays from blocks.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be `None`.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
# Helper function because Python 2.7 doesn't have
# itertools.accumulate
value = 0
accumulated = []
for v in values:
value += v
accumulated.append(value)
return accumulated
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatation.
Returns
-------
shape: tuple of int
This tuple satisfies:
```
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
```
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds:
```
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
```
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple everytime.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim: int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
for subarray in _block_dispatcher(subarrays):
yield subarray
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 2, 3, 4, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[2, 3, 4]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
|
|
from sympy import (Symbol, zeta, nan, Rational, Float, pi, dirichlet_eta, log,
zoo, expand_func, polylog, lerchphi, S, exp, sqrt, I,
exp_polar, polar_lift, O, stieltjes, Abs, Sum, oo)
from sympy.core.function import ArgumentIndexError
from sympy.functions.combinatorial.numbers import bernoulli, factorial
from sympy.utilities.pytest import raises
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx, verify_numerically as tn)
x = Symbol('x')
a = Symbol('a')
b = Symbol('b', negative=True)
z = Symbol('z')
s = Symbol('s')
def test_zeta_eval():
assert zeta(nan) is nan
assert zeta(x, nan) is nan
assert zeta(0) == Rational(-1, 2)
assert zeta(0, x) == S.Half - x
assert zeta(0, b) == S.Half - b
assert zeta(1) is zoo
assert zeta(1, 2) is zoo
assert zeta(1, -7) is zoo
assert zeta(1, x) is zoo
assert zeta(2, 1) == pi**2/6
assert zeta(2) == pi**2/6
assert zeta(4) == pi**4/90
assert zeta(6) == pi**6/945
assert zeta(2, 2) == pi**2/6 - 1
assert zeta(4, 3) == pi**4/90 - Rational(17, 16)
assert zeta(6, 4) == pi**6/945 - Rational(47449, 46656)
assert zeta(2, -2) == pi**2/6 + Rational(5, 4)
assert zeta(4, -3) == pi**4/90 + Rational(1393, 1296)
assert zeta(6, -4) == pi**6/945 + Rational(3037465, 2985984)
assert zeta(oo) == 1
assert zeta(-1) == Rational(-1, 12)
assert zeta(-2) == 0
assert zeta(-3) == Rational(1, 120)
assert zeta(-4) == 0
assert zeta(-5) == Rational(-1, 252)
assert zeta(-1, 3) == Rational(-37, 12)
assert zeta(-1, 7) == Rational(-253, 12)
assert zeta(-1, -4) == Rational(119, 12)
assert zeta(-1, -9) == Rational(539, 12)
assert zeta(-4, 3) == -17
assert zeta(-4, -8) == 8772
assert zeta(0, 1) == Rational(-1, 2)
assert zeta(0, -1) == Rational(3, 2)
assert zeta(0, 2) == Rational(-3, 2)
assert zeta(0, -2) == Rational(5, 2)
assert zeta(
3).evalf(20).epsilon_eq(Float("1.2020569031595942854", 20), 1e-19)
def test_zeta_series():
assert zeta(x, a).series(a, 0, 2) == \
zeta(x, 0) - x*a*zeta(x + 1, 0) + O(a**2)
def test_dirichlet_eta_eval():
assert dirichlet_eta(0) == S.Half
assert dirichlet_eta(-1) == Rational(1, 4)
assert dirichlet_eta(1) == log(2)
assert dirichlet_eta(2) == pi**2/12
assert dirichlet_eta(4) == pi**4*Rational(7, 720)
def test_rewriting():
assert dirichlet_eta(x).rewrite(zeta) == (1 - 2**(1 - x))*zeta(x)
assert zeta(x).rewrite(dirichlet_eta) == dirichlet_eta(x)/(1 - 2**(1 - x))
assert zeta(x).rewrite(dirichlet_eta, a=2) == zeta(x)
assert tn(dirichlet_eta(x), dirichlet_eta(x).rewrite(zeta), x)
assert tn(zeta(x), zeta(x).rewrite(dirichlet_eta), x)
assert zeta(x, a).rewrite(lerchphi) == lerchphi(1, x, a)
assert polylog(s, z).rewrite(lerchphi) == lerchphi(z, s, 1)*z
assert lerchphi(1, x, a).rewrite(zeta) == zeta(x, a)
assert z*lerchphi(z, s, 1).rewrite(polylog) == polylog(s, z)
def test_derivatives():
from sympy import Derivative
assert zeta(x, a).diff(x) == Derivative(zeta(x, a), x)
assert zeta(x, a).diff(a) == -x*zeta(x + 1, a)
assert lerchphi(
z, s, a).diff(z) == (lerchphi(z, s - 1, a) - a*lerchphi(z, s, a))/z
assert lerchphi(z, s, a).diff(a) == -s*lerchphi(z, s + 1, a)
assert polylog(s, z).diff(z) == polylog(s - 1, z)/z
b = randcplx()
c = randcplx()
assert td(zeta(b, x), x)
assert td(polylog(b, z), z)
assert td(lerchphi(c, b, x), x)
assert td(lerchphi(x, b, c), x)
raises(ArgumentIndexError, lambda: lerchphi(c, b, x).fdiff(2))
raises(ArgumentIndexError, lambda: lerchphi(c, b, x).fdiff(4))
raises(ArgumentIndexError, lambda: polylog(b, z).fdiff(1))
raises(ArgumentIndexError, lambda: polylog(b, z).fdiff(3))
def myexpand(func, target):
expanded = expand_func(func)
if target is not None:
return expanded == target
if expanded == func: # it didn't expand
return False
# check to see that the expanded and original evaluate to the same value
subs = {}
for a in func.free_symbols:
subs[a] = randcplx()
return abs(func.subs(subs).n()
- expanded.replace(exp_polar, exp).subs(subs).n()) < 1e-10
def test_polylog_expansion():
from sympy import log
assert polylog(s, 0) == 0
assert polylog(s, 1) == zeta(s)
assert polylog(s, -1) == -dirichlet_eta(s)
assert polylog(s, exp_polar(I*pi*Rational(4, 3))) == polylog(s, exp(I*pi*Rational(4, 3)))
assert polylog(s, exp_polar(I*pi)/3) == polylog(s, exp(I*pi)/3)
assert myexpand(polylog(1, z), -log(1 - z))
assert myexpand(polylog(0, z), z/(1 - z))
assert myexpand(polylog(-1, z), z/(1 - z)**2)
assert ((1-z)**3 * expand_func(polylog(-2, z))).simplify() == z*(1 + z)
assert myexpand(polylog(-5, z), None)
def test_issue_8404():
i = Symbol('i', integer=True)
assert Abs(Sum(1/(3*i + 1)**2, (i, 0, S.Infinity)).doit().n(4)
- 1.122) < 0.001
def test_polylog_values():
from sympy.utilities.randtest import verify_numerically as tn
assert polylog(2, 2) == pi**2/4 - I*pi*log(2)
assert polylog(2, S.Half) == pi**2/12 - log(2)**2/2
for z in [S.Half, 2, (sqrt(5)-1)/2, -(sqrt(5)-1)/2, -(sqrt(5)+1)/2, (3-sqrt(5))/2]:
assert Abs(polylog(2, z).evalf() - polylog(2, z, evaluate=False).evalf()) < 1e-15
z = Symbol("z")
for s in [-1, 0]:
for _ in range(10):
assert tn(polylog(s, z), polylog(s, z, evaluate=False), z,
a=-3, b=-2, c=S.Half, d=2)
assert tn(polylog(s, z), polylog(s, z, evaluate=False), z,
a=2, b=-2, c=5, d=2)
from sympy import Integral
assert polylog(0, Integral(1, (x, 0, 1))) == -S.Half
def test_lerchphi_expansion():
assert myexpand(lerchphi(1, s, a), zeta(s, a))
assert myexpand(lerchphi(z, s, 1), polylog(s, z)/z)
# direct summation
assert myexpand(lerchphi(z, -1, a), a/(1 - z) + z/(1 - z)**2)
assert myexpand(lerchphi(z, -3, a), None)
# polylog reduction
assert myexpand(lerchphi(z, s, S.Half),
2**(s - 1)*(polylog(s, sqrt(z))/sqrt(z)
- polylog(s, polar_lift(-1)*sqrt(z))/sqrt(z)))
assert myexpand(lerchphi(z, s, 2), -1/z + polylog(s, z)/z**2)
assert myexpand(lerchphi(z, s, Rational(3, 2)), None)
assert myexpand(lerchphi(z, s, Rational(7, 3)), None)
assert myexpand(lerchphi(z, s, Rational(-1, 3)), None)
assert myexpand(lerchphi(z, s, Rational(-5, 2)), None)
# hurwitz zeta reduction
assert myexpand(lerchphi(-1, s, a),
2**(-s)*zeta(s, a/2) - 2**(-s)*zeta(s, (a + 1)/2))
assert myexpand(lerchphi(I, s, a), None)
assert myexpand(lerchphi(-I, s, a), None)
assert myexpand(lerchphi(exp(I*pi*Rational(2, 5)), s, a), None)
def test_stieltjes():
assert isinstance(stieltjes(x), stieltjes)
assert isinstance(stieltjes(x, a), stieltjes)
# Zero'th constant EulerGamma
assert stieltjes(0) == S.EulerGamma
assert stieltjes(0, 1) == S.EulerGamma
# Not defined
assert stieltjes(nan) is nan
assert stieltjes(0, nan) is nan
assert stieltjes(-1) is S.ComplexInfinity
assert stieltjes(1.5) is S.ComplexInfinity
assert stieltjes(z, 0) is S.ComplexInfinity
assert stieltjes(z, -1) is S.ComplexInfinity
def test_stieltjes_evalf():
assert abs(stieltjes(0).evalf() - 0.577215664) < 1E-9
assert abs(stieltjes(0, 0.5).evalf() - 1.963510026) < 1E-9
assert abs(stieltjes(1, 2).evalf() + 0.072815845 ) < 1E-9
def test_issue_10475():
a = Symbol('a', extended_real=True)
b = Symbol('b', extended_positive=True)
s = Symbol('s', zero=False)
assert zeta(2 + I).is_finite
assert zeta(1).is_finite is False
assert zeta(x).is_finite is None
assert zeta(x + I).is_finite is None
assert zeta(a).is_finite is None
assert zeta(b).is_finite is None
assert zeta(-b).is_finite is True
assert zeta(b**2 - 2*b + 1).is_finite is None
assert zeta(a + I).is_finite is True
assert zeta(b + 1).is_finite is True
assert zeta(s + 1).is_finite is True
def test_issue_14177():
n = Symbol('n', positive=True, integer=True)
assert zeta(2*n) == (-1)**(n + 1)*2**(2*n - 1)*pi**(2*n)*bernoulli(2*n)/factorial(2*n)
assert zeta(-n) == (-1)**(-n)*bernoulli(n + 1)/(n + 1)
n = Symbol('n')
assert zeta(2*n) == zeta(2*n) # As sign of z (= 2*n) is not determined
|
|
import copy
from ctypeslib.codegen import typedesc
from funcs import find_unqualified_type
def _signatures_types(funcs, items):
"""Given a sequence of typedesc.Function instances, generate a set of all
typedesc instances used in function declarations."""
arguments = set()
for f in funcs:
for t in f.iterArgTypes():
ut = find_unqualified_type(t)
if ut in items:
arguments.add(ut)
ut = find_unqualified_type(f.returns)
if ut in items:
arguments.add(ut)
return arguments
class TypePuller:
def __init__(self, all):
self._items = []
#self._all = sorted(all, cmpitems)
self._all = all
self._done = set()
# This list contains a list of struct names for which the puller will
# not pull the members. This is an hack to avoid some cycle in
# recursives structs declarations which refer one to each other.
self._STRUCTS_IGNORE = ['_IO_FILE', '_IO_marker', 'yoyo11', 'yoyo12']
def pull_fundamental(self, item):
pass
def pull_cv_qualified_type(self, item):
self.pull(item.typ)
#names.add(item.name)
self._items.append(item)
def pull_typedef(self, item):
# XXX: Generate the typdef itself
if not item in self._done:
self._done.add(item)
self.pull(item.typ)
#names.add(item.name)
self._items.append(item)
def pull_function(self, item):
# XXX: fix signatures_type for single item
types = _signatures_types([item], self._all)
#names.append(item.name)
for t in types:
ut = find_unqualified_type(t)
if ut in self._all:
self.pull(ut)
self._items.append(item)
def pull_function_type(self, item):
# XXX: fix signatures_type for single item
types = _signatures_types([item], self._all)
#self._items.append(item)
for t in types:
ut = find_unqualified_type(t)
if ut in self._all:
self.pull(ut)
def pull_structure(self, item):
#names.append(item.name)
if not item in self._done:
self._done.add(item)
if item.name in self._STRUCTS_IGNORE:
# XXX: hack. We remove all members of the ignored structures,
# to generate an opaque structure in the code genrator.
print "Ignoring", item, item.name
item.members = []
else:
for m in item.members:
if isinstance(m, typedesc.Field):
f = m.typ
# XXX: ugly hack. Cython does not support structures
# with members refering to itself through a typedef, so
# we "untypedef" the member if we detect such a case
if isinstance(f, typedesc.PointerType) \
and isinstance(f.typ, typedesc.Typedef) \
and isinstance(f.typ.typ, typedesc.Structure) \
and f.typ.typ == item:
newf = copy.deepcopy(f)
newf.typ = newf.typ.typ
m.typ = newf
self.pull(m)
self._items.append(item)
def pull_union(self, item):
#names.append(item.name)
for m in item.members:
g = self.pull(m)
if g:
self._items.append(pull(m))
self._items.append(item)
def pull_array_type(self, item):
#names.append(item.name)
self.pull(item.typ)
self._items.append(item)
def pull_enumeration(self, item):
#names.append(item.name)
for v in item.values:
self.pull(v)
self._items.append(item)
def pull_enum_value(self, item):
#names.append(item.name)
self._items.append(item)
def pull(self, item):
if isinstance(item, typedesc.FundamentalType):
#print "Fund Pulling", item, item.name
self.pull_fundamental(item)
return
elif isinstance(item, typedesc.Enumeration):
#print "Enumeration Pulling", item, item.name
self.pull_enumeration(item)
return
elif isinstance(item, typedesc.EnumValue):
#print "Enumeration Pulling", item, item.name
self.pull_enum_value(item)
return
elif isinstance(item, typedesc.Typedef):
#print "Typedef Pulling", item, item.name
self.pull_typedef(item)
return
elif isinstance(item, typedesc.Structure):
#print "Struct Pulling", item, item.name
self.pull_structure(item)
return
elif isinstance(item, typedesc.Union):
#print "FunctionType Pulling", item
self.pull_union(item)
return
elif isinstance(item, typedesc.Function):
#print "Func Pulling", item
self.pull_function(item)
return
elif isinstance(item, typedesc.Field):
#print "Field Pulling", item
self.pull(item.typ)
return
elif isinstance(item, typedesc.PointerType):
#print "Pointer Pulling", item
self.pull(item.typ)
return
elif isinstance(item, typedesc.FunctionType):
#print "FunctionType Pulling", item
self.pull_function_type(item)
return
elif isinstance(item, typedesc.CvQualifiedType):
#print "FunctionType Pulling", item
self.pull_cv_qualified_type(item)
return
elif isinstance(item, typedesc.ArrayType):
#print "FunctionType Pulling", item
self.pull_array_type(item)
return
elif isinstance(item, typedesc.Ignored):
return
else:
raise ValueError, ("item not handled:", item)
def values(self):
return self._items
def instance_puller(tp, all):
p = TypePuller(all)
p.pull(tp)
return p.values()
def cmpitems(a, b):
aloc = getattr(a, "location", None)
bloc = getattr(b, "location", None)
if aloc is None:
return -1
if bloc is None:
return 1
st = cmp(aloc[0],bloc[0]) or cmp(int(aloc[1]),int(bloc[1]))
if st == 0:
# Two items as the same location: if it is a typedef'd structure with
# different name and tag, we make sure the structure is defined before
# the typedef. If it is a different case, just do nothing for now
if isinstance(a, typedesc.Structure):
if isinstance(b, typedesc.Typedef):
return -1
else:
# XXX
print "Hm, not sure what to do here"
return 0
if isinstance(b, typedesc.Structure):
if isinstance(a, typedesc.Typedef):
return 1
else:
print "Hm, not sure what to do here"
return 0
else:
return st
|
|
"""
Routines for interacting with AutoDock Vina program
These routines were developed by:
Rodrigo Antonio Faccioli - rodrigo.faccioli@usp.br / rodrigo.faccioli@gmail.com
Leandro Oliveira Bortot - leandro.bortot@usp.br / leandro.obt@gmail.com
"""
import sys
import os
import ntpath
from subprocess import Popen, PIPE
import shutil
import mol2
import pdbqt
""" This function obtains all pdbqt files
in mypath
"""
def get_files_pdbqt(mypath):
only_pdbqt_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdbqt"):
f_path = os.path.join(root,file)
only_pdbqt_file.append(f_path)
#Prepare to return a list sorted by torsion angles present in pdbqt
d_docking = {}
for f in only_pdbqt_file:
f_name = str(str(os.path.basename(f)).split(".")[0])
d_docking[f_name] = pdbqt.get_number_torsion_angle(f)
sorted_log_dict = pdbqt.sort_dictionary(d_docking)
root, dirs, files in os.walk(mypath)
only_pdbqt_file = []
for l_item in sorted_log_dict:
f_lig = str(l_item[0])+".pdbqt"
f_path = os.path.join(root,f_lig)
only_pdbqt_file.append(f_path)
return only_pdbqt_file
""" This function obtains all pdb files
in mypath
"""
def get_files_pdb(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdb"):
f_path = os.path.join(root,file)
only_pdb_file.append(f_path)
return only_pdb_file
""" This function obtains all mol2 files
in mypath
"""
def get_files_mol2(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".mol2"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def check_for_preparing_ligand(mol2_path, pdbqt_ligand_path, pythonsh, script_ligand4):
if len(get_files_mol2(mol2_path)) == 0 and len(get_files_pdb(mol2_path)) == 0:
raise EnvironmentError("Either mol2 or pdb of ligands not found ")
if not os.path.exists(pdbqt_ligand_path):
os.makedirs(pdbqt_ligand_path)
if os.path.isfile(pythonsh) == False:
raise EnvironmentError("pythonsh for vina, not found")
if os.path.isfile(script_ligand4) == False:
raise EnvironmentError("script_ligand4 for vina, not found")
def check_for_preparing_receptor(pdb_path, pdbqt_receptor_path, pythonsh, script_receptor4):
if len(get_files_pdb(pdb_path)) == 0:
raise EnvironmentError("PDB of receptors not found ")
if not os.path.exists(pdbqt_receptor_path):
os.makedirs(pdbqt_receptor_path)
if os.path.isfile(pythonsh) == False:
raise EnvironmentError("pythonsh for vina, not found")
if os.path.isfile(script_receptor4) == False:
raise EnvironmentError("script_receptor4 for vina, not found")
def check_for_running_docking(config_file, vina_program):
"""
Check programs and config file for performing of AutoDock vina
Example:
>>> check_for_running_docking(config_file, vina_program)
"""
if os.path.isfile(config_file) == False:
raise EnvironmentError("config_file for vina, not found")
if os.path.isfile(vina_program) == False:
raise EnvironmentError("vina_program for vina, not found")
""" This function is created the pdbqt file get_name_pdbqt
based on fmol2 file name
"""
def get_name_pdbqt(reference):
path, filename = ntpath.split(reference)
name = str(filename.split(".")[0]) #remove either .mol2 or .pdb
fpdbqt = name+".pdbqt"
return fpdbqt
""" In this function is build the pdbqt file name for ligand
"""
def get_name_ligand_pdbqt(reference):
name = os.path.basename(reference)
if str(name).find(".pdb") > 0:
name = str(name).replace(".pdb", ".pdbqt")
if str(name).find(".mol2") > 0:
name = str(name).replace(".mol2", ".pdbqt")
return name
""" This function converts mol2 files
to pdbqt files
"""
def prepare_ligand(path_mol2, path_pdbqt, pythonsh, script_ligand4):
if not os.path.isdir(path_pdbqt):
os.mkdir(path_pdbqt)
mol2_files = get_files_mol2(path_mol2)
for fmol2 in mol2_files:
fpdbqt_filename = get_name_ligand_pdbqt(fmol2)
fpdbqt = os.path.join(path_pdbqt,fpdbqt_filename)
process = Popen([pythonsh, script_ligand4, '-l', fmol2, '-v', '-o', fpdbqt, '-U', 'nphs_lps', '-A', 'hydrogens'], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
#command = pythonsh + " " + script_ligand4 + " "+ '-l' + " "+ fmol2 + " "+ '-v' + " "+ '-o'+ " "+ fpdbqt + "\n"
#os.system(command)
""" This function converts pdb ligand files
to pdbqt files
"""
def prepare_ligand_pdb(path_pdb, path_pdbqt, pythonsh, script_ligand4):
if not os.path.isdir(path_pdbqt):
os.mkdir(path_pdbqt)
pdb_files = get_files_pdb(path_pdb)
for fpdb in pdb_files:
fpdbqt_filename = get_name_ligand_pdbqt(fpdb)
fpdbqt = os.path.join(path_pdbqt,fpdbqt_filename)
process = Popen([pythonsh, script_ligand4, '-l', fpdb, '-v', '-o', fpdbqt, '-U', 'nphs_lps', '-A', 'hydrogens'], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
#command = pythonsh + " " + script_ligand4 + " "+ '-l' + " "+ fmol2 + " "+ '-v' + " "+ '-o'+ " "+ fpdbqt + "\n"
#os.system(command)
""" This function converts pdb files
to pdbqt files
"""
def prepare_receptor(path_pdb, path_pdbqt, pythonsh, script_receptor4):
if not os.path.isdir(path_pdbqt):
os.mkdir(path_pdbqt)
pdb_files = get_files_pdb(path_pdb)
for fpdb in pdb_files:
fpdbqt_filename = get_name_pdbqt(fpdb)
fpdbqt = os.path.join(path_pdbqt,fpdbqt_filename)
process = Popen([pythonsh, script_receptor4, '-r', fpdb, '-o', fpdbqt, '-v', '-U', 'nphs_lps', '-A', 'none'], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
""" This function obtains the name of receptor
based on file name
"""
def get_name_receptor(receptor):
path, filename = ntpath.split(receptor)
name = str(filename.split(".")[0])
return name
""" This function creates out file name
"""
def get_name_out(receptor, ligand):
return receptor+'_-_'+ligand+'.pdbqt'
""" This function creates log file name
"""
def get_name_log(receptor, ligand):
return receptor+'_-_'+ligand+'.log'
""" This function is executed the docking from
one receptor against all ligands.
It is used when is not working with MPI
"""
def run_docking(vina_program, vina_conf, receptor, path_ligand_pdbqt, path_struct, path_log):
name_receptor = get_name_receptor(receptor)
all_ligands = get_files_pdbqt(path_ligand_pdbqt)
for ligand in all_ligands:
name_ligand = pdbqt.get_name_ligand(ligand)
f_out = os.path.join(path_struct,get_name_out(name_receptor, name_ligand))
f_log = os.path.join(path_log, get_name_log(name_receptor, name_ligand))
process = Popen([vina_program, '--config', vina_conf, '--receptor', receptor, '--ligand', ligand, '--out', f_out, '--log', f_log], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.logging_v2.proto import logging_config_pb2 as google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ConfigServiceV2Stub(object):
"""Service for configuring sinks used to export log entries out of
Logging.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListSinks = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/ListSinks',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListSinksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListSinksResponse.FromString,
)
self.GetSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/GetSink',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.GetSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.FromString,
)
self.CreateSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/CreateSink',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.CreateSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.FromString,
)
self.UpdateSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/UpdateSink',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.UpdateSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.FromString,
)
self.DeleteSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/DeleteSink',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.DeleteSinkRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListExclusions = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/ListExclusions',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListExclusionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListExclusionsResponse.FromString,
)
self.GetExclusion = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/GetExclusion',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.GetExclusionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.FromString,
)
self.CreateExclusion = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/CreateExclusion',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.CreateExclusionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.FromString,
)
self.UpdateExclusion = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/UpdateExclusion',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.UpdateExclusionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.FromString,
)
self.DeleteExclusion = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/DeleteExclusion',
request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.DeleteExclusionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class ConfigServiceV2Servicer(object):
"""Service for configuring sinks used to export log entries out of
Logging.
"""
def ListSinks(self, request, context):
"""Lists sinks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSink(self, request, context):
"""Gets a sink.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateSink(self, request, context):
"""Creates a sink that exports specified log entries to a destination. The
export of newly-ingested log entries begins immediately, unless the sink's
`writer_identity` is not permitted to write to the destination. A sink can
export log entries only from the resource owning the sink.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateSink(self, request, context):
"""Updates a sink. This method replaces the following fields in the existing
sink with values from the new sink: `destination`, and `filter`.
The updated sink might also have a new `writer_identity`; see the
`unique_writer_identity` field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSink(self, request, context):
"""Deletes a sink. If the sink has a unique `writer_identity`, then that
service account is also deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListExclusions(self, request, context):
"""Lists all the exclusions in a parent resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetExclusion(self, request, context):
"""Gets the description of an exclusion.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateExclusion(self, request, context):
"""Creates a new exclusion in a specified parent resource.
Only log entries belonging to that resource can be excluded.
You can have up to 10 exclusions in a resource.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateExclusion(self, request, context):
"""Changes one or more properties of an existing exclusion.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteExclusion(self, request, context):
"""Deletes an exclusion.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ConfigServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
'ListSinks': grpc.unary_unary_rpc_method_handler(
servicer.ListSinks,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListSinksRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListSinksResponse.SerializeToString,
),
'GetSink': grpc.unary_unary_rpc_method_handler(
servicer.GetSink,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.GetSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.SerializeToString,
),
'CreateSink': grpc.unary_unary_rpc_method_handler(
servicer.CreateSink,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.CreateSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.SerializeToString,
),
'UpdateSink': grpc.unary_unary_rpc_method_handler(
servicer.UpdateSink,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.UpdateSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogSink.SerializeToString,
),
'DeleteSink': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSink,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.DeleteSinkRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListExclusions': grpc.unary_unary_rpc_method_handler(
servicer.ListExclusions,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListExclusionsRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.ListExclusionsResponse.SerializeToString,
),
'GetExclusion': grpc.unary_unary_rpc_method_handler(
servicer.GetExclusion,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.GetExclusionRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.SerializeToString,
),
'CreateExclusion': grpc.unary_unary_rpc_method_handler(
servicer.CreateExclusion,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.CreateExclusionRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.SerializeToString,
),
'UpdateExclusion': grpc.unary_unary_rpc_method_handler(
servicer.UpdateExclusion,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.UpdateExclusionRequest.FromString,
response_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.LogExclusion.SerializeToString,
),
'DeleteExclusion': grpc.unary_unary_rpc_method_handler(
servicer.DeleteExclusion,
request_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__config__pb2.DeleteExclusionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.logging.v2.ConfigServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
|
# -*- test-case-name: calendarserver.push.test.test_applepush -*-
##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.internet.ssl import ChainingOpenSSLContextFactory
from twext.python.log import Logger
from txweb2 import responsecode
from txdav.xml import element as davxml
from txweb2.dav.noneprops import NonePropertyStore
from txweb2.http import Response
from txweb2.http_headers import MimeType
from txweb2.server import parsePOSTData
from twisted.application import service
from twisted.internet.protocol import Protocol
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.internet.task import LoopingCall
from twistedcaldav.extensions import DAVResource, DAVResourceWithoutChildrenMixin
from twistedcaldav.resource import ReadOnlyNoCopyResourceMixIn
import json
import OpenSSL
import struct
import time
from txdav.common.icommondatastore import InvalidSubscriptionValues
from calendarserver.push.util import (
validToken, TokenHistory, PushScheduler, PushPriority
)
from twext.internet.adaptendpoint import connect
from twext.internet.gaiendpoint import GAIEndpoint
from twisted.python.constants import Values, ValueConstant
log = Logger()
class ApplePushPriority(Values):
"""
Maps calendarserver.push.util.PushPriority values to APNS-specific values
"""
low = ValueConstant(PushPriority.low.value)
medium = ValueConstant(PushPriority.medium.value)
high = ValueConstant(PushPriority.high.value)
class ApplePushNotifierService(service.MultiService):
"""
ApplePushNotifierService is a MultiService responsible for
setting up the APN provider and feedback connections. Once
connected, calling its enqueue( ) method sends notifications
to any device token which is subscribed to the enqueued key.
The Apple Push Notification protocol is described here:
https://developer.apple.com/library/ios/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/Chapters/CommunicatingWIthAPS.html
"""
log = Logger()
@classmethod
def makeService(
cls, settings, store, testConnectorClass=None,
reactor=None
):
"""
Creates the various "subservices" that work together to implement
APN, including "provider" and "feedback" services for CalDAV and
CardDAV.
@param settings: The portion of the configuration specific to APN
@type settings: C{dict}
@param store: The db store for storing/retrieving subscriptions
@type store: L{IDataStore}
@param testConnectorClass: Used for unit testing; implements
connect( ) and receiveData( )
@type testConnectorClass: C{class}
@param reactor: Used for unit testing; allows tests to advance the
clock in order to test the feedback polling service.
@type reactor: L{twisted.internet.task.Clock}
@return: instance of L{ApplePushNotifierService}
"""
service = cls()
service.store = store
service.providers = {}
service.feedbacks = {}
service.purgeCall = None
service.purgeIntervalSeconds = settings["SubscriptionPurgeIntervalSeconds"]
service.purgeSeconds = settings["SubscriptionPurgeSeconds"]
for protocol in ("CalDAV", "CardDAV"):
if settings[protocol]["CertificatePath"]:
providerTestConnector = None
feedbackTestConnector = None
if testConnectorClass is not None:
providerTestConnector = testConnectorClass()
feedbackTestConnector = testConnectorClass()
provider = APNProviderService(
service.store,
settings["ProviderHost"],
settings["ProviderPort"],
settings[protocol]["CertificatePath"],
settings[protocol]["PrivateKeyPath"],
chainPath=settings[protocol]["AuthorityChainPath"],
passphrase=settings[protocol]["Passphrase"],
staggerNotifications=settings["EnableStaggering"],
staggerSeconds=settings["StaggerSeconds"],
testConnector=providerTestConnector,
reactor=reactor,
)
provider.setServiceParent(service)
service.providers[protocol] = provider
service.log.info(
"APNS %s topic: %s" %
(protocol, settings[protocol]["Topic"]))
feedback = APNFeedbackService(
service.store,
settings["FeedbackUpdateSeconds"],
settings["FeedbackHost"],
settings["FeedbackPort"],
settings[protocol]["CertificatePath"],
settings[protocol]["PrivateKeyPath"],
chainPath=settings[protocol]["AuthorityChainPath"],
passphrase=settings[protocol]["Passphrase"],
testConnector=feedbackTestConnector,
reactor=reactor,
)
feedback.setServiceParent(service)
service.feedbacks[protocol] = feedback
return service
def startService(self):
"""
In addition to starting the provider and feedback sub-services, start a
LoopingCall whose job it is to purge old subscriptions
"""
service.MultiService.startService(self)
self.log.debug("ApplePushNotifierService startService")
self.purgeCall = LoopingCall(self.purgeOldSubscriptions, self.purgeSeconds)
self.purgeCall.start(self.purgeIntervalSeconds, now=False)
def stopService(self):
"""
In addition to stopping the provider and feedback sub-services, stop the
LoopingCall
"""
service.MultiService.stopService(self)
self.log.debug("ApplePushNotifierService stopService")
if self.purgeCall is not None:
self.purgeCall.stop()
self.purgeCall = None
@inlineCallbacks
def purgeOldSubscriptions(self, purgeSeconds):
"""
Remove any subscriptions that registered more than purgeSeconds ago
@param purgeSeconds: The cutoff given in seconds
@type purgeSeconds: C{int}
"""
self.log.debug("ApplePushNotifierService purgeOldSubscriptions")
txn = self.store.newTransaction(label="ApplePushNotifierService.purgeOldSubscriptions")
yield txn.purgeOldAPNSubscriptions(int(time.time()) - purgeSeconds)
yield txn.commit()
@inlineCallbacks
def enqueue(
self, transaction, pushKey, dataChangedTimestamp=None,
priority=PushPriority.high
):
"""
Sends an Apple Push Notification to any device token subscribed to
this pushKey.
@param pushKey: The identifier of the resource that was updated, including
a prefix indicating whether this is CalDAV or CardDAV related.
"/CalDAV/abc/def/"
@type pushKey: C{str}
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification (Only used for unit tests)
@type key: C{int}
@param priority: the priority level
@type priority: L{PushPriority}
"""
try:
protocol = pushKey.split("/")[1]
except ValueError:
# pushKey has no protocol, so we can't do anything with it
self.log.error("Push key '%s' is missing protocol" % (pushKey,))
return
# Unit tests can pass this value in; otherwise it defaults to now
if dataChangedTimestamp is None:
dataChangedTimestamp = int(time.time())
provider = self.providers.get(protocol, None)
if provider is not None:
# Look up subscriptions for this key
subscriptions = (yield transaction.apnSubscriptionsByKey(pushKey))
numSubscriptions = len(subscriptions)
if numSubscriptions > 0:
self.log.debug(
"Sending %d APNS notifications for %s" %
(numSubscriptions, pushKey))
tokens = [record.token for record in subscriptions if record.token and record.subscriberGUID]
if tokens:
provider.scheduleNotifications(
tokens, pushKey,
dataChangedTimestamp, priority)
class APNProviderProtocol(Protocol):
"""
Implements the Provider portion of APNS
"""
log = Logger()
# Sent by provider
COMMAND_PROVIDER = 2
# Received by provider
COMMAND_ERROR = 8
# Returned only for an error. Successful notifications get no response.
STATUS_CODES = {
0 : "No errors encountered",
1 : "Processing error",
2 : "Missing device token",
3 : "Missing topic",
4 : "Missing payload",
5 : "Invalid token size",
6 : "Invalid topic size",
7 : "Invalid payload size",
8 : "Invalid token",
255 : "None (unknown)",
}
# If error code comes back as one of these, remove the associated device
# token
TOKEN_REMOVAL_CODES = (5, 8)
MESSAGE_LENGTH = 6
def makeConnection(self, transport):
self.history = TokenHistory()
self.log.debug("ProviderProtocol makeConnection")
Protocol.makeConnection(self, transport)
def connectionMade(self):
self.log.debug("ProviderProtocol connectionMade")
self.buffer = ""
# Store a reference to ourself on the factory so the service can
# later call us
self.factory.connection = self
self.factory.clientConnectionMade()
def connectionLost(self, reason=None):
# self.log.debug("ProviderProtocol connectionLost: %s" % (reason,))
# Clear the reference to us from the factory
self.factory.connection = None
@inlineCallbacks
def dataReceived(self, data, fn=None):
"""
Buffer and divide up received data into error messages which are
always 6 bytes long
"""
if fn is None:
fn = self.processError
self.log.debug("ProviderProtocol dataReceived %d bytes" % (len(data),))
self.buffer += data
while len(self.buffer) >= self.MESSAGE_LENGTH:
message = self.buffer[:self.MESSAGE_LENGTH]
self.buffer = self.buffer[self.MESSAGE_LENGTH:]
try:
command, status, identifier = struct.unpack("!BBI", message)
if command == self.COMMAND_ERROR:
yield fn(status, identifier)
except Exception, e:
self.log.warn(
"ProviderProtocol could not process error: %s (%s)" %
(message.encode("hex"), e))
@inlineCallbacks
def processError(self, status, identifier):
"""
Handles an error message we've received on the provider channel.
If the error code is one that indicates a bad token, remove all
subscriptions corresponding to that token.
@param status: The status value returned from APN Feedback server
@type status: C{int}
@param identifier: The identifier of the outbound push notification
message which had a problem.
@type status: C{int}
"""
msg = self.STATUS_CODES.get(status, "Unknown status code")
self.log.info("Received APN error %d on identifier %d: %s" % (status, identifier, msg))
if status in self.TOKEN_REMOVAL_CODES:
token = self.history.extractIdentifier(identifier)
if token is not None:
self.log.debug(
"Removing subscriptions for bad token: %s" %
(token,))
txn = self.factory.store.newTransaction(label="APNProviderProtocol.processError")
subscriptions = (yield txn.apnSubscriptionsByToken(token))
for record in subscriptions:
self.log.debug(
"Removing subscription: %s %s" %
(token, record.resourceKey))
yield txn.removeAPNSubscription(token, record.resourceKey)
yield txn.commit()
def sendNotification(self, token, key, dataChangedTimestamp, priority):
"""
Sends a push notification message for the key to the device associated
with the token.
@param token: The device token subscribed to the key
@type token: C{str}
@param key: The key we're sending a notification about
@type key: C{str}
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification
@type key: C{int}
"""
if not (token and key and dataChangedTimestamp):
return
try:
binaryToken = token.replace(" ", "").decode("hex")
except:
self.log.error("Invalid APN token in database: %s" % (token,))
return
identifier = self.history.add(token)
apnsPriority = ApplePushPriority.lookupByValue(priority.value).value
payload = json.dumps(
{
"key" : key,
"dataChangedTimestamp" : dataChangedTimestamp,
"pushRequestSubmittedTimestamp" : int(time.time()),
}
)
payloadLength = len(payload)
self.log.debug(
"Sending APNS notification to {token}: id={id} payload={payload} priority={priority}",
token=token, id=identifier, payload=payload, priority=apnsPriority)
"""
Notification format
Top level: Command (1 byte), Frame length (4 bytes), Frame data (variable)
Within Frame data: Item ...
Item: Item number (1 byte), Item data length (2 bytes), Item data (variable)
Item 1: Device token (32 bytes)
Item 2: Payload (variable length) in JSON format, not null-terminated
Item 3: Notification ID (4 bytes) an opaque value used for reporting errors
Item 4: Expiration date (4 bytes) UNIX epoch in secondcs UTC
Item 5: Priority (1 byte): 10 (push sent immediately) or 5 (push sent
at a time that conservces power on the device receiving it)
"""
# Frame struct.pack format ! Network byte order
command = self.COMMAND_PROVIDER # B
frameLength = (# I
# Item 1 (Device token)
1 + # Item number # B
2 + # Item length # H
32 + # device token # 32s
# Item 2 (Payload)
1 + # Item number # B
2 + # Item length # H
payloadLength + # the JSON payload # %d s
# Item 3 (Notification ID)
1 + # Item number # B
2 + # Item length # H
4 + # Notification ID # I
# Item 4 (Expiration)
1 + # Item number # B
2 + # Item length # H
4 + # Expiration seconds since epoch # I
# Item 5 (Priority)
1 + # Item number # B
2 + # Item length # H
1 # Priority # B
)
self.transport.write(
struct.pack(
"!BIBH32sBH%dsBHIBHIBHB" % (payloadLength,),
command, # Command
frameLength, # Frame length
1, # Item 1 (Device token)
32, # Token Length
binaryToken, # Token
2, # Item 2 (Payload)
payloadLength, # Payload length
payload, # Payload
3, # Item 3 (Notification ID)
4, # Notification ID Length
identifier, # Notification ID
4, # Item 4 (Expiration)
4, # Expiration length
int(time.time()) + 72 * 60 * 60, # Expires in 72 hours
5, # Item 5 (Priority)
1, # Priority length
apnsPriority, # Priority
)
)
class APNProviderFactory(ReconnectingClientFactory):
log = Logger()
protocol = APNProviderProtocol
def __init__(self, service, store):
self.service = service
self.store = store
self.noisy = True
self.maxDelay = 30 # max seconds between connection attempts
self.shuttingDown = False
def clientConnectionMade(self):
self.log.info("Connection to APN server made")
self.service.clientConnectionMade()
self.delay = 1.0
def clientConnectionLost(self, connector, reason):
if not self.shuttingDown:
self.log.info("Connection to APN server lost: %s" % (reason,))
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
self.log.error("Unable to connect to APN server: %s" % (reason,))
self.connected = False
ReconnectingClientFactory.clientConnectionFailed(
self, connector,
reason)
def retry(self, connector=None):
self.log.info("Reconnecting to APN server")
ReconnectingClientFactory.retry(self, connector)
def stopTrying(self):
self.shuttingDown = True
ReconnectingClientFactory.stopTrying(self)
class APNConnectionService(service.Service):
log = Logger()
def __init__(
self, host, port, certPath, keyPath, chainPath="",
passphrase="", sslMethod="TLSv1_METHOD", testConnector=None,
reactor=None
):
self.host = host
self.port = port
self.certPath = certPath
self.keyPath = keyPath
self.chainPath = chainPath
self.passphrase = passphrase
self.sslMethod = sslMethod
self.testConnector = testConnector
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
def connect(self, factory):
if self.testConnector is not None:
# For testing purposes
self.testConnector.connect(self, factory)
else:
if self.passphrase:
passwdCallback = lambda *ignored : self.passphrase
else:
passwdCallback = None
context = ChainingOpenSSLContextFactory(
self.keyPath,
self.certPath,
certificateChainFile=self.chainPath,
passwdCallback=passwdCallback,
sslmethod=getattr(OpenSSL.SSL, self.sslMethod)
)
connect(GAIEndpoint(self.reactor, self.host, self.port, context),
factory)
class APNProviderService(APNConnectionService):
def __init__(
self, store, host, port, certPath, keyPath, chainPath="",
passphrase="", sslMethod="TLSv1_METHOD",
staggerNotifications=False, staggerSeconds=3,
testConnector=None, reactor=None
):
APNConnectionService.__init__(
self, host, port, certPath, keyPath,
chainPath=chainPath, passphrase=passphrase, sslMethod=sslMethod,
testConnector=testConnector, reactor=reactor)
self.store = store
self.factory = None
self.queue = []
if staggerNotifications:
self.scheduler = PushScheduler(
self.reactor, self.sendNotification,
staggerSeconds=staggerSeconds)
else:
self.scheduler = None
def startService(self):
self.log.debug("APNProviderService startService")
self.factory = APNProviderFactory(self, self.store)
self.connect(self.factory)
def stopService(self):
self.log.debug("APNProviderService stopService")
if self.factory is not None:
self.factory.stopTrying()
if self.scheduler is not None:
self.scheduler.stop()
def clientConnectionMade(self):
# Service the queue
if self.queue:
# Copy and clear the queue. Any notifications that don't get
# sent will be put back into the queue.
queued = list(self.queue)
self.queue = []
for (token, key), dataChangedTimestamp, priority in queued:
if token and key and dataChangedTimestamp and priority:
self.sendNotification(
token, key, dataChangedTimestamp,
priority)
def scheduleNotifications(self, tokens, key, dataChangedTimestamp, priority):
"""
The starting point for getting notifications to the APNS server. If there is
a connection to the APNS server, these notifications are scheduled (or directly
sent if there is no scheduler). If there is no connection, the notifications
are saved for later.
@param tokens: The device tokens to schedule notifications for
@type tokens: List of strings
@param key: The key to use for this batch of notifications
@type key: String
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification
@type key: C{int}
"""
# Service has reference to factory has reference to protocol instance
connection = getattr(self.factory, "connection", None)
if connection is not None:
if self.scheduler is not None:
self.scheduler.schedule(tokens, key, dataChangedTimestamp, priority)
else:
for token in tokens:
self.sendNotification(token, key, dataChangedTimestamp, priority)
else:
self._saveForWhenConnected(tokens, key, dataChangedTimestamp, priority)
def _saveForWhenConnected(self, tokens, key, dataChangedTimestamp, priority):
"""
Called in order to save notifications that can't be sent now because there
is no connection to the APNS server. (token, key) tuples are appended to
the queue which is serviced during clientConnectionMade()
@param tokens: The device tokens to schedule notifications for
@type tokens: List of C{str}
@param key: The key to use for this batch of notifications
@type key: C{str}
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification
@type key: C{int}
"""
for token in tokens:
tokenKeyPair = (token, key)
for existingPair, _ignore_timstamp, priority in self.queue:
if tokenKeyPair == existingPair:
self.log.debug("APNProviderService has no connection; skipping duplicate: %s %s" % (token, key))
break # Already scheduled
else:
self.log.debug("APNProviderService has no connection; queuing: %s %s" % (token, key))
self.queue.append(((token, key), dataChangedTimestamp, priority))
def sendNotification(self, token, key, dataChangedTimestamp, priority):
"""
If there is a connection the notification is sent right away, otherwise
the notification is saved for later.
@param token: The device token to send a notifications to
@type token: C{str}
@param key: The key to use for this notification
@type key: C{str}
@param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
which triggered this notification
@type key: C{int}
"""
if not (token and key and dataChangedTimestamp, priority):
return
# Service has reference to factory has reference to protocol instance
connection = getattr(self.factory, "connection", None)
if connection is None:
self._saveForWhenConnected([token], key, dataChangedTimestamp, priority)
else:
connection.sendNotification(token, key, dataChangedTimestamp, priority)
class APNFeedbackProtocol(Protocol):
"""
Implements the Feedback portion of APNS
"""
log = Logger()
MESSAGE_LENGTH = 38
def connectionMade(self):
self.log.debug("FeedbackProtocol connectionMade")
self.buffer = ""
@inlineCallbacks
def dataReceived(self, data, fn=None):
"""
Buffer and divide up received data into feedback messages which are
always 38 bytes long
"""
if fn is None:
fn = self.processFeedback
self.log.debug("FeedbackProtocol dataReceived %d bytes" % (len(data),))
self.buffer += data
while len(self.buffer) >= self.MESSAGE_LENGTH:
message = self.buffer[:self.MESSAGE_LENGTH]
self.buffer = self.buffer[self.MESSAGE_LENGTH:]
try:
timestamp, _ignore_tokenLength, binaryToken = struct.unpack(
"!IH32s",
message)
token = binaryToken.encode("hex").lower()
yield fn(timestamp, token)
except Exception, e:
self.log.warn(
"FeedbackProtocol could not process message: %s (%s)" %
(message.encode("hex"), e))
@inlineCallbacks
def processFeedback(self, timestamp, token):
"""
Handles a feedback message indicating that the given token is no
longer active as of the timestamp, and its subscription should be
removed as long as that device has not re-subscribed since the
timestamp.
@param timestamp: Seconds since the epoch
@type timestamp: C{int}
@param token: The device token to unsubscribe
@type token: C{str}
"""
self.log.debug(
"FeedbackProtocol processFeedback time=%d token=%s" %
(timestamp, token))
txn = self.factory.store.newTransaction(label="APNFeedbackProtocol.processFeedback")
subscriptions = (yield txn.apnSubscriptionsByToken(token))
for record in subscriptions:
if timestamp > record.modified:
self.log.debug(
"FeedbackProtocol removing subscription: %s %s" %
(token, record.resourceKey))
yield txn.removeAPNSubscription(token, record.resourceKey)
yield txn.commit()
class APNFeedbackFactory(ClientFactory):
log = Logger()
protocol = APNFeedbackProtocol
def __init__(self, store):
self.store = store
def clientConnectionFailed(self, connector, reason):
self.log.error(
"Unable to connect to APN feedback server: %s" %
(reason,))
self.connected = False
ClientFactory.clientConnectionFailed(self, connector, reason)
class APNFeedbackService(APNConnectionService):
def __init__(
self, store, updateSeconds, host, port,
certPath, keyPath, chainPath="", passphrase="", sslMethod="TLSv1_METHOD",
testConnector=None, reactor=None
):
APNConnectionService.__init__(
self, host, port, certPath, keyPath,
chainPath=chainPath, passphrase=passphrase, sslMethod=sslMethod,
testConnector=testConnector, reactor=reactor)
self.store = store
self.updateSeconds = updateSeconds
def startService(self):
self.log.debug("APNFeedbackService startService")
self.factory = APNFeedbackFactory(self.store)
self.checkForFeedback()
def stopService(self):
self.log.debug("APNFeedbackService stopService")
if self.nextCheck is not None:
self.nextCheck.cancel()
def checkForFeedback(self):
self.nextCheck = None
self.log.debug("APNFeedbackService checkForFeedback")
self.connect(self.factory)
self.nextCheck = self.reactor.callLater(
self.updateSeconds,
self.checkForFeedback)
class APNSubscriptionResource(
ReadOnlyNoCopyResourceMixIn,
DAVResourceWithoutChildrenMixin, DAVResource
):
"""
The DAV resource allowing clients to subscribe to Apple push notifications.
To subscribe, a client should first determine the key they are interested
in my examining the "pushkey" DAV property on the home or collection they
want to monitor. Next the client sends an authenticated HTTP GET or POST
request to this resource, passing their device token and the key in either
the URL params or in the POST body.
"""
log = Logger()
def __init__(self, parent, store):
DAVResource.__init__(
self, principalCollections=parent.principalCollections()
)
self.parent = parent
self.store = store
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = NonePropertyStore(self)
return self._dead_properties
def etag(self):
return succeed(None)
def checkPreconditions(self, request):
return None
def defaultAccessControlList(self):
return succeed(
davxml.ACL(
# DAV:Read for authenticated principals
davxml.ACE(
davxml.Principal(davxml.Authenticated()),
davxml.Grant(
davxml.Privilege(davxml.Read()),
),
davxml.Protected(),
),
# DAV:Write for authenticated principals
davxml.ACE(
davxml.Principal(davxml.Authenticated()),
davxml.Grant(
davxml.Privilege(davxml.Write()),
),
davxml.Protected(),
),
)
)
def contentType(self):
return MimeType.fromString("text/html; charset=utf-8")
def resourceType(self):
return None
def isCollection(self):
return False
def isCalendarCollection(self):
return False
def isPseudoCalendarCollection(self):
return False
@inlineCallbacks
def http_POST(self, request):
yield self.authorize(request, (davxml.Write(),))
yield parsePOSTData(request)
code, msg = (yield self.processSubscription(request))
returnValue(self.renderResponse(code, body=msg))
http_GET = http_POST
@inlineCallbacks
def processSubscription(self, request):
"""
Given an authenticated request, use the token and key arguments
to add a subscription entry to the database.
@param request: The request to process
@type request: L{txweb2.server.Request}
"""
token = request.args.get("token", ("",))[0].replace(" ", "").lower()
key = request.args.get("key", ("",))[0]
userAgent = request.headers.getHeader("user-agent", "-")
host = request.remoteAddr.host
fwdHeaders = request.headers.getRawHeaders("x-forwarded-for", [])
if fwdHeaders:
host = fwdHeaders[0]
if not (key and token):
code = responsecode.BAD_REQUEST
msg = "Invalid request: both 'token' and 'key' must be provided"
elif not validToken(token):
code = responsecode.BAD_REQUEST
msg = "Invalid request: bad 'token' %s" % (token,)
else:
uid = request.authnUser.record.uid
try:
yield self.addSubscription(token, key, uid, userAgent, host)
code = responsecode.OK
msg = None
except InvalidSubscriptionValues:
code = responsecode.BAD_REQUEST
msg = "Invalid subscription values"
returnValue((code, msg))
@inlineCallbacks
def addSubscription(self, token, key, uid, userAgent, host):
"""
Add a subscription (or update its timestamp if already there).
@param token: The device token, must be lowercase
@type token: C{str}
@param key: The push key
@type key: C{str}
@param uid: The uid of the subscriber principal
@type uid: C{str}
@param userAgent: The user-agent requesting the subscription
@type key: C{str}
@param host: The host requesting the subscription
@type key: C{str}
"""
now = int(time.time()) # epoch seconds
txn = self.store.newTransaction(label="APNSubscriptionResource.addSubscription")
yield txn.addAPNSubscription(token, key, now, uid, userAgent, host)
yield txn.commit()
def renderResponse(self, code, body=None):
response = Response(code, {}, body)
response.headers.setHeader("content-type", MimeType("text", "html"))
return response
|
|
import struct
import sys
import math
from .encoding import encode as base_pack, decode as unpack
from .object_update import decode_obj_update_packet
from .enumerations import *
def pack(fmt, *args):
return base_pack(fmt, args)
class SoftDecodeFailure(RuntimeError):
pass
PACKETS = {}
def packet(n):
def wrapper(cls):
PACKETS[n] = cls
cls.packet_id = n
return cls
return wrapper
class UndecodedPacket:
def __init__(self, packet_id, data):
self.packet_id = packet_id
self.data = data
def encode(self):
return self.data
@classmethod
def decode(cls, data):
return cls(0, data)
def __str__(self):
return "<UndecodedPacket id=0x{0:08x} data={1!r}>".format(self.packet_id, self.data)
@packet(0x6d04b3da)
class WelcomePacket:
def __init__(self, message=''):
self.message = message
def encode(self):
encoded_message = self.message.encode('ascii')
return struct.pack('<I', len(encoded_message)) + encoded_message
@classmethod
def decode(cls, packet):
string_length, = struct.unpack('<I', packet[:4])
decoded_message = packet[4:].decode('ascii')
if string_length != len(decoded_message):
raise ValueError('String length inconsistent with decoded length (should be {}, actually {})'.format(len(decoded_message), string_length))
return cls(decoded_message)
def __str__(self):
return "<WelcomePacket {0!r}>".format(self.message)
@packet(0xe548e74a)
class VersionPacket:
def __init__(self, major, minor, patch):
self.major = major
self.minor = minor
self.patch = patch
def encode(self):
return pack('IfIII', 0,
float('{}.{}'.format(self.major, self.minor)),
self.major, self.minor, self.patch)
@classmethod
def decode(cls, packet):
unknown_1, legacy_version, major, minor, patch = unpack('IfIII', packet)
return cls(major, minor, patch)
def __str__(self):
return "<VersionPacket {}.{}.{}>".format(self.major, self.minor, self.patch)
@packet(0x3de66711)
class DifficultyPacket:
def __init__(self, difficulty, game_type):
self.difficulty = difficulty
self.game_type = game_type
def encode(self):
return pack('II', self.difficulty, self.game_type.value)
@classmethod
def decode(cls, packet):
difficulty, game_type_raw = unpack('II', packet)
return cls(difficulty, GameType(game_type_raw))
def __str__(self):
return "<DifficultyPacket difficulty={} game_type={}>".format(self.difficulty, self.game_type)
@packet(0x19c6e2d4)
class ConsoleStatusPacket:
def __init__(self, ship, consoles):
self.consoles = {key: consoles.get(key, ConsoleStatus.available) for key in Console}
self.ship = ship
def encode(self):
return pack('I[B]', self.ship,
[(self.consoles[console].value,) for console in Console])
@classmethod
def decode(cls, packet):
ship, body = unpack('I[B]', packet)
body = [x[0] for x in body]
if len(body) != len(Console):
raise ValueError("Incorrect console count ({}, should be {})".format(len(body), len(Console)))
consoles = {console: ConsoleStatus(body[console.value]) for console in Console}
return cls(ship, consoles)
def __str__(self):
return '<ConsoleStatusPacket ship={0} consoles={1!r}>'.format(self.ship,
{console: status
for console, status in self.consoles.items()
if status != ConsoleStatus.available})
@packet(0xf5821226)
class HeartbeatPacket:
def encode(self):
return b''
@classmethod
def decode(cls, packet):
if packet != b'':
raise ValueError('Payload in heartbeat')
return cls()
def __str__(self):
return "<HeartbeatPacket>"
@packet(0xee665279)
class IntelPacket:
def __init__(self, object, intel):
self.object = object
self.intel = intel
def encode(self):
return pack('Ibu', self.object, 3, self.intel)
@classmethod
def decode(cls, packet):
object, _unk, intel = unpack('Ibu', packet)
return cls(object, intel)
def __str__(self):
return '<IntelPacket object={0} intel={1!r}>'.format(self.object, self.intel)
@packet(0xd672c35f)
class CommsIncomingPacket:
def __init__(self, priority, sender, message):
self.priority = priority
self.sender = sender
self.message = message
def encode(self):
return pack('Iuu', self.priority, self.sender, self.message.replace('\n', '^'))
@classmethod
def decode(cls, packet):
prio, sender, message = unpack('Iuu', packet)
return cls(prio, sender, message.replace('^', '\n'))
def __str__(self):
return '<CommsIncomingPacket priority={0} sender={1!r} message={2!r}>'.format(self.priority, self.sender, self.message)
@packet(0x80803df9)
class ObjectUpdatePacket:
def __init__(self, raw_data):
self.raw_data = raw_data
@property
def _records(self):
return decode_obj_update_packet(self.raw_data)
@property
def records(self):
try:
return self._records
except Exception:
return []
@classmethod
def decode(cls, packet):
if packet == b'\x00\x00\x00\x00':
return NoisePacket()
return cls(packet)
def encode(self):
return self.raw_data
def __str__(self):
try:
records = repr(self._records)
return '<ObjectUpdatePacket records={}>'.format(records)
except Exception as e:
return '<ObjectUpdatePacket data={0!r} error={1!r}>'.format(self.raw_data, e)
class NoisePacket:
def __init__(self):
self.packet_id = 0x80803df9
def encode(self):
return b'\x00\x00\x00\x00'
def __str__(self):
return '<NoisePacket>'
@packet(0xcc5a3e30)
class DestroyObjectPacket:
def __init__(self, type, object):
self.type = type
self.object = object
def encode(self):
return pack('BI', self.type.value, self.object)
@classmethod
def decode(cls, packet):
type, object = unpack('BI', packet)
return cls(type=ObjectType(type), object=object)
def __str__(self):
return '<DestroyObjectPacket type={0!r} object={1!r}>'.format(self.type, self.object)
@packet(0xf754c8fe)
class GameMessagePacket:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return GameStartPacket.decode(packet)
if subtype_index == 6:
return GameEndPacket.decode(packet)
if subtype_index == 9:
return SkyboxPacket.decode(packet)
if subtype_index == 10:
return PopupPacket.decode(packet)
if subtype_index == 11:
return AutonomousDamconPacket.decode(packet)
if subtype_index == 12:
return JumpStartPacket.decode(packet)
if subtype_index == 13:
return JumpEndPacket.decode(packet)
if subtype_index == 15:
return AllShipSettingsPacket.decode(packet)
if subtype_index == 16:
return DmxPacket.decode(packet)
raise SoftDecodeFailure()
class GameStartPacket(GameMessagePacket):
def encode(self):
return b'\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 12:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<GameStartPacket>'
class GameEndPacket(GameMessagePacket):
def encode(self):
return b'\x06\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<GameEndPacket>'
class AllShipSettingsPacket(GameMessagePacket):
def __init__(self, ships):
self.ships = list(ships)
if len(self.ships) != 8:
raise ValueError('Must be 8 ships, {} given'.format(len(self.ships)))
def encode(self):
return pack('I[IIIu]', 15,
[(ship.drive.value, ship.type.value, 1, ship.name)
for ship in self.ships])
@classmethod
def decode(cls, packet):
_id, records = unpack('I[IIIu]', packet)
return cls(ShipSettingsRecord(DriveType(drv), ShipType(typ), name)
for drv, typ, _what, name in records)
def __str__(self):
return '<AllShipSettingsPacket settings={0!r}>'.format(self.ships)
class JumpStartPacket(GameMessagePacket):
def encode(self):
return b'\x0c\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<JumpStartPacket>'
class JumpEndPacket(GameMessagePacket):
def encode(self):
return b'\x0d\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<JumpEndPacket>'
class DmxPacket(GameMessagePacket):
def __init__(self, flag, state):
self.flag = flag
self.state = state
def encode(self):
return pack('IuI', 0x10, self.flag, int(self.state))
@classmethod
def decode(cls, packet):
_id, flag, state = unpack('IuI', packet)
return cls(flag, state)
def __str__(self):
return '<DmxPacket flag={0!r} state={1!r}>'.format(self.flag, self.state)
class SkyboxPacket(GameMessagePacket):
def __init__(self, skybox):
self.skybox = skybox
def encode(self):
return pack('II', 9, self.skybox)
@classmethod
def decode(cls, packet):
_id, skybox = unpack('II', packet)
return cls(skybox)
def __str__(self):
return '<SkyboxPacket skybox={0!r}>'.format(self.skybox)
class PopupPacket(GameMessagePacket):
def __init__(self, message):
self.message = message
def encode(self):
return pack('Iu', 0x0a, self.message)
@classmethod
def decode(cls, packet):
_id, message = unpack('Iu', packet)
return cls(message)
def __str__(self):
return '<PopupPacket message={0!r}>'.format(self.message)
class AutonomousDamconPacket(GameMessagePacket):
def __init__(self, autonomy):
self.autonomy = autonomy
def encode(self):
return pack('II', 0x0b, int(self.autonomy))
@classmethod
def decode(cls, packet):
_id, autonomy = unpack('II', packet)
return cls(bool(autonomy))
def __str__(self):
return '<AutonomousDamconPacket autonomy={0!r}>'.format(self.autonomy)
@packet(0x4c821d3c)
class ShipAction1Packet:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return HelmSetWarpPacket.decode(packet)
if subtype_index == 1:
return SetMainScreenPacket.decode(packet)
if subtype_index == 2:
return SetWeaponsTargetPacket.decode(packet)
if subtype_index == 3:
return ToggleAutoBeamsPacket.decode(packet)
if subtype_index == 4:
return ToggleShieldsPacket.decode(packet)
if subtype_index == 7:
return HelmRequestDockPacket.decode(packet)
if subtype_index == 10:
return ToggleRedAlertPacket.decode(packet)
if subtype_index == 11:
return SetBeamFreqPacket.decode(packet)
if subtype_index == 13:
return SetShipPacket.decode(packet)
if subtype_index == 14:
return SetConsolePacket.decode(packet)
if subtype_index == 15:
return ReadyPacket.decode(packet)
if subtype_index == 16:
return SciSelectPacket.decode(packet)
if subtype_index == 17:
return CaptainSelectPacket.decode(packet)
if subtype_index == 18:
return GameMasterSelectPacket.decode(packet)
if subtype_index == 19:
return SciScanPacket.decode(packet)
if subtype_index == 22:
return SetShipSettingsPacket.decode(packet)
if subtype_index == 24:
return HelmToggleReversePacket.decode(packet)
if subtype_index == 25:
return Ready2Packet.decode(packet)
if subtype_index == 26:
return TogglePerspectivePacket.decode(packet)
if subtype_index == 27:
return ClimbDivePacket.decode(packet)
raise SoftDecodeFailure()
class SciScanPacket(ShipAction1Packet):
def __init__(self, target):
self.target = target
def encode(self):
return pack('II', 19, self.target)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
return cls(tgt)
def __str__(self):
return "<SciScanPacket target={0!r}>".format(self.target)
class CaptainSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 17, self.object)
else:
return pack('II', 17, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<CaptainSelectPacket object={0!r}>".format(self.object)
class GameMasterSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 18, self.object)
else:
return pack('II', 18, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<GameMasterSelectPacket object={0!r}>".format(self.object)
class SciSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 16, self.object)
else:
return pack('II', 16, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<SciSelectPacket object={0!r}>".format(self.object)
class SetWeaponsTargetPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 2, self.object)
else:
return pack('II', 2, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<SetWeaponsTargetPacket object={0!r}>".format(self.object)
class SetBeamFreqPacket(ShipAction1Packet):
def __init__(self, freq):
self.freq = freq
def encode(self):
return pack('II', 11, self.freq)
@classmethod
def decode(cls, packet):
_idx, freq = unpack('II', packet)
return cls(freq)
def __str__(self):
return "<SetBeamFreqPacket freq={}>".format(self.freq)
class HelmToggleReversePacket(ShipAction1Packet):
def encode(self):
return b'\x18\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x18\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in reverse packet')
return cls()
def __str__(self):
return '<HelmToggleReversePacket>'
class ReadyPacket(ShipAction1Packet):
def encode(self):
return b'\x0f\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x0f\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in ready packet')
return cls()
def __str__(self):
return '<ReadyPacket>'
class Ready2Packet(ShipAction1Packet):
def encode(self):
return b'\x19\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x19\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in ready2 packet')
return cls()
def __str__(self):
return '<Ready2Packet>'
class SetShipSettingsPacket(ShipAction1Packet):
def __init__(self, drive, type, name):
self.drive = drive
self.type = type
self.name = name
def encode(self):
return pack('IIIIu', 0x16, self.drive.value, self.type.value, 1, self.name)
@classmethod
def decode(cls, packet):
_id, drv, typ, _unk, name = unpack('IIIIu', packet)
return cls(drive=DriveType(drv),
type=ShipType(typ),
name=name)
def __str__(self):
return '<SetShipSettingsPacket drive={0!r} type={1!r} name={2!r}>'.format(self.drive, self.type, self.name)
class HelmRequestDockPacket(ShipAction1Packet):
def encode(self):
return b'\x07\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x07\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<HelmRequestDockPacket>'
class ToggleShieldsPacket(ShipAction1Packet):
def encode(self):
return b'\x04\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x04\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleShieldsPacket>'
class ToggleRedAlertPacket(ShipAction1Packet):
def encode(self):
return b'\x0a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x0a\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleRedAlertPacket>'
class ToggleAutoBeamsPacket(ShipAction1Packet):
def encode(self):
return b'\x03\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x03\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleAutoBeamsPacket>'
class TogglePerspectivePacket(ShipAction1Packet):
def encode(self):
return b'\x1a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x1a\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<TogglePerspectivePacket>'
class ClimbDivePacket(ShipAction1Packet):
def __init__(self, direction):
self.direction = direction
def encode(self):
return pack('Ii', 27, self.direction)
@classmethod
def decode(cls, packet):
_id, direction = unpack('Ii', packet)
return cls(direction)
def __str__(self):
return "<ClimbDivePacket direction={0!r}>".format(self.direction)
class SetMainScreenPacket(ShipAction1Packet):
def __init__(self, screen):
self.screen = screen
def encode(self):
return pack('II', 1, self.screen.value)
@classmethod
def decode(cls, packet):
_idx, screen_id = unpack('II', packet)
return cls(MainView(screen_id))
def __str__(self):
return "<SetMainScreenPacket screen={0!r}>".format(self.screen)
class SetConsolePacket(ShipAction1Packet):
def __init__(self, console, selected):
self.console = console
self.selected = selected
def encode(self):
return pack('III', 0x0e, self.console.value, 1 if self.selected else 0)
@classmethod
def decode(cls, packet):
_idx, console_id, selected = unpack('III', packet)
return cls(Console(console_id), bool(selected))
def __str__(self):
return "<SetConsolePacket console={0!r} selected={1!r}>".format(self.console, self.selected)
class HelmSetWarpPacket(ShipAction1Packet):
def __init__(self, warp):
self.warp = warp
def encode(self):
return pack('II', 0, self.warp)
@classmethod
def decode(cls, packet):
_idx, warp = unpack('II', packet)
return cls(warp)
def __str__(self):
return "<HelmSetWarpPacket warp={}>".format(self.warp)
class SetShipPacket(ShipAction1Packet):
def __init__(self, ship):
self.ship = ship
def encode(self):
return pack('II', 0x0d, self.ship)
@classmethod
def decode(cls, packet):
_idx, ship = unpack('II', packet)
return cls(ship)
def __str__(self):
return "<SetShipPacket ship={}>".format(self.ship)
@packet(0x0351a5ac)
class ShipAction3Packet:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return HelmSetImpulsePacket.decode(packet)
if subtype_index == 1:
return HelmSetSteeringPacket.decode(packet)
if subtype_index == 5:
return HelmJumpPacket.decode(packet)
raise SoftDecodeFailure()
class HelmSetSteeringPacket(ShipAction3Packet):
def __init__(self, rudder):
self.rudder = rudder
def encode(self):
return pack('If', 1, self.rudder)
@classmethod
def decode(cls, packet):
_idx, rudder = unpack('If', packet)
return cls(rudder)
def __str__(self):
return '<HelmSetSteeringPacket rudder={0!r}>'.format(self.rudder)
class HelmSetImpulsePacket(ShipAction3Packet):
def __init__(self, impulse):
self.impulse = impulse
def encode(self):
return pack('If', 0, self.impulse)
@classmethod
def decode(cls, packet):
_idx, impulse = unpack('If', packet)
return cls(impulse)
def __str__(self):
return '<HelmSetImpulsePacket impulse={0!r}>'.format(self.impulse)
class HelmJumpPacket(ShipAction3Packet):
def __init__(self, bearing, distance):
self.bearing = bearing
self.distance = distance
def encode(self):
return pack('Iff', 5, self.bearing / (math.pi * 2), self.distance / 50)
@classmethod
def decode(cls, packet):
_idx, bearing, distance = unpack('Iff', packet)
return cls(bearing * (math.pi * 2), distance * 50)
def __str__(self):
return '<HelmJumpPacket bearing={0!r} distance={1!r}>'.format(self.bearing, self.distance)
@packet(0xb83fd2c4)
class BeamFiredPacket:
def __init__(self, object, port, origin, target, x, y, z, auto):
self.object = object
self.port = port
self.origin = origin
self.target = target
self.x = x
self.y = y
self.z = z
self.auto = auto
def encode(self):
return pack('IIIIIIIIfffI',
self.object, 0, 1200,
self.port,
1, 1,
self.origin, self.target,
self.x, self.y, self.z,
0 if self.auto else 1)
@classmethod
def decode(cls, packet):
object, _unk1, _unk2, port, _unk3, _unk4, origin, target, x, y, z, auto = unpack('IIIIIIIIfffI', packet)
return cls(object, port, origin, target, x, y, z, [True, False][auto])
def __str__(self):
return '<BeamFiredPacket object={object} port={port} origin={origin} target={target} position=({x}, {y}, {z}) automatic={auto!r}>'.format(**self.__dict__)
def encode(packet, provenance=PacketProvenance.client):
encoded_block = packet.encode()
block_len = len(encoded_block)
return (struct.pack('<IIIIII',
0xdeadbeef,
24 + block_len,
provenance.value,
0x00,
4 + block_len,
packet.packet_id) + encoded_block)
def decode(packet, provenance=PacketProvenance.server): # returns packets, trail
if not packet:
return [], b''
de_index = packet.find(0xef)
if de_index > 0:
sys.stderr.write("WARNING: skipping {} bytes of stream to resync\n".format(de_index))
sys.stderr.flush()
packet = packet[de_index:]
elif de_index == -1:
# wtf?
return [], b''
buffer_len = len(packet)
if buffer_len < 24:
return [], packet
header, packet_len, origin, padding, remaining, ptype = struct.unpack('<IIIIII', packet[:24])
if header != 0xdeadbeef:
raise ValueError("Incorrect packet header")
if packet_len < 24:
raise ValueError("Packet too short")
if origin != provenance.value:
raise ValueError("Incorrect packet origin field")
if remaining != packet_len - 20:
raise ValueError("Inconsistent packet length fields")
if buffer_len < packet_len:
return [], packet
trailer = packet[packet_len:]
payload = packet[24:packet_len]
rest, trailer = decode(trailer)
try:
if ptype in PACKETS:
# we know how to decode this one
return [PACKETS[ptype].decode(payload)] + rest, trailer
else:
raise SoftDecodeFailure()
except SoftDecodeFailure: # meaning unhandled bits
return [UndecodedPacket(ptype, payload)] + rest, trailer
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a classification model and a label agreement model using co-training.
This class makes use of a Trainer for the classification model and a trainer
for the agreement model, and alternatively trains each of them. After each
iteration some unlabeled samples are labeled using the classification model,
such that in the next iteration both models are re-trained using more labeled
data.
Throughout this file, the suffix "_cls" refers to the classification model, and
"_agr" to the agreement model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from ..data.dataset import CotrainDataset
from ..models.gcn import GCN
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from .trainer_agreement import TrainerAgreement
from .trainer_agreement import TrainerAgreementAlwaysAgree
from .trainer_agreement import TrainerPerfectAgreement
from .trainer_base import Trainer
from .trainer_classification import TrainerClassification
from .trainer_classification import TrainerPerfectClassification
from .trainer_classification_gcn import TrainerClassificationGCN
class TrainerCotraining(Trainer):
"""Trainer for a co-training model with agreement.
Attributes:
model_cls: An object whose type is a subclass of Model, representing the
model for the sample classifier.
model_agr: An object whose type is a subclass of Model, representing the
model for the agreement model.
max_num_iter_cotrain: An integer representing the maximum number of cotrain
iterations to perform.
min_num_iter_cls: An integer representing the minimum number of iterations
to train the classification model for.
max_num_iter_cls: An integer representing the maximum number of iterations
to train the classification model for.
num_iter_after_best_val_cls: An integer representing the number of extra
iterations to perform after improving the validation accuracy of the
classification model.
min_num_iter_agr: An integer representing the minimum number of iterations
to train the agreement model for.
max_num_iter_agr: An integer representing the maximum number of iterations
to train the agreement model.
num_iter_after_best_val_agr: An integer representing the number of extra
iterations to perform after improving the agreement validation accuracy.
num_samples_to_label: Maximum number of samples to self-label after each
cotrain iteration, provided that they have confidence higher than the
min_confidence_new_label threshold.
min_confidence_new_label: A float number between [0, 1] representing the
minimum confidence the prediction for an unlabeled sample needs to have in
order to allow it to be self-labeled. The confidence is the maximum
probability the classification model assigns to any of the classes.
keep_label_proportions: A boolean specifying whether to choose samples for
self-labeling such that we maintain the original label proportions.
num_warm_up_iter_agr: An integer representing the number of times we need to
train the agreement model (i.e. number of cotrain iterations that train
the agreement) before we start using it in the classification model's
loss. While the agreement is not warmed up, the agreement model will
always predict either disagreement, or agreement, by default, depending on
the argument `agree_by_default`.
optimizer: An optimizer.
gradient_clip: A float number representing the maximum gradient norm allowed
if we do gradient clipping. If None, no gradient clipping is performed.
batch_size_agr: An integer representing the batch size of the agreement
model.
batch_size_cls: An integer representing the batch size of the classification
model. This is used for the supervised component of the loss and for
evaluation.
learning_rate_cls: A float representing the learning rate used when training
the classification model.
learning_rate_agr: A float representing the learning rate used when training
the agreement model.
warm_start_cls: Boolean specifying if the classification model is trained
from scratch in every cotrain itertion (if False), or if it continues from
the parameter values in the previous cotrain iteration (if True).
warm_start_agr: Boolean specifying if the agreement model is trained from
scratch in every cotrain itertion (if False), or if it continues from the
parameter values in the previous cotrain iteration (if True).
enable_summaries: Boolean specifying whether to write TensorBoard summaries
for the cotrain progress.
enable_summaries_per_model: Boolean specifying whether to write TensorBoard
summaries for the classification and agreement model progress.
summary_dir: Directory path where to save the Tensorflow summaries.
summary_step_cls: Integer representing the number of iterations after which
to write TensorFlow summaries for the classification model.
summary_step_agr: Integer representing the number of iterations after which
to write TensorFlow summaries for the agreement model.
logging_step_cls: Integer representing the number of iterations after which
to log the loss and other training metrics for the classification model.
logging_step_agr: Integer representing the number of iterations after which
to log the loss and other training metrics for the agreement model.
eval_step_cls: Integer representing the number of iterations after which to
evaluate the classification model.
eval_step_agr: Integer representing the number of iterations after which to
evaluate the agreement model.
checkpoints_step: Integer representing the number of iterations after which
to save checkpoints.
checkpoints_dir: Directory where to save checkpoints.
data_dir: Directory where to write some files that contain self-labeled data
backup.
abs_loss_chg_tol: A float representing the absolute tolerance for checking
if the training loss has converged. If the difference between the current
loss and previous loss is less than `abs_loss_chg_tol`, we count this
iteration towards convergence (see `loss_chg_iter_below_tol`).
rel_loss_chg_tol: A float representing the relative tolerance for checking
if the training loss has converged. If the ratio between the current loss
and previous loss is less than `rel_loss_chg_tol`, we count this iteration
towards convergence (see `loss_chg_iter_below_tol`).
loss_chg_iter_below_tol: An integer representing the number of consecutive
iterations that pass the convergence criteria before stopping training.
use_perfect_agr: Boolean specifying whether to use a perfect agreement model
that peeks at the correct test labels (for debugging only).
use_perfect_cls: Boolean specifying whether to use a perfect classification
model that peeks at the correct test labels (for debugging only).
ratio_valid_agr: Ratio of the labeled sample pairs to use for validation
whent training the agreement model.
max_samples_valid_agr: Maximum number of sample pairs to use for validation
whent training the agreement model.
weight_decay_cls: Weight for the weight decay term in the classification
model loss.
weight_decay_schedule_cls: Schedule how to adjust the classification weight
decay weight after every cotrain iteration.
weight_decay_agr: Weight for the weight decay term in the agreement model
loss.
weight_decay_schedule_agr: Schedule how to adjust the agreement weight decay
weight after every cotrain iteration.
reg_weight_ll: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-labeled pairs of samples.
reg_weight_lu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-unlabeled pairs of samples.
reg_weight_uu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
unlabeled-unlabeled pairs of samples.
num_pairs_reg: An integer representing the number of sample pairs of each
type (LL, LU, UU) to include in each computation of the classification
model loss.
reg_weight_vat: A float representing the weight of the virtual adversarial
training (VAT) regularization loss in the classification model loss
function.
use_ent_min: A boolean specifying whether to use entropy regularization with
VAT.
penalize_neg_agr: Whether to not only encourage agreement between samples
that the agreement model believes should have the same label, but also
penalize agreement when two samples agree when the agreement model
predicts they should disagree.
use_l2_cls: Whether to use L2 loss for classification, as opposed to the
whichever loss is specified in the provided model_cls.
first_iter_original: A boolean specifying whether the first cotrain
iteration trains the original classification model (with no agreement
term). We do this to evaluate how well a baseline model would do without
the agreement. If true, there is no self-labeling after the first
iteration, which trains original model. Self-labeling will be used only in
the iterations that do include the agreement term.
inductive: Boolean specifying whether this is an inductive or transductive
setting. If inductive, then the validation and test labels are never seen
when training the classification model. If transductive, the inputs of the
test and validation samples are available at training time and can be used
in the agreement loss term of the classification model as unsupervised
regularization, and can also be labeled via self-labeling.
seed: An integer representing the seed for the random number generator used
when selecting batches of samples.
eval_acc_pred_by_agr: Boolean specifying whether to evaluate the accuracy of
a model that uses our trained agreement model to make predictions for the
test samples, in a way similar to k-nearest neighbors, where the distance
is given by the agreement model predictions.
num_neighbors_pred_by_agr: An integer representing the number of neighbors
to use when predicting by agreement. Note that this needs to be at least
as much as the number of classes.
load_from_checkpoint: A boolean specifying whethe the trained models are
loaded from checkpoint, if one is available. If False, the models are
always trained from scratch.
use_graph: Boolean specifying whether to use to apply the agreement model on
the graph edges, or otherwise use random pairs of samples.
always_agree: Whether the agreement model should return 1.0 always (i.e. the
samples always agree), to simulate the Neural Graph Machines model.
add_negative_edges_agr:
"""
def __init__(self,
model_cls,
model_agr,
max_num_iter_cotrain,
min_num_iter_cls,
max_num_iter_cls,
num_iter_after_best_val_cls,
min_num_iter_agr,
max_num_iter_agr,
num_iter_after_best_val_agr,
num_samples_to_label,
min_confidence_new_label=0.0,
keep_label_proportions=False,
num_warm_up_iter_agr=1,
optimizer=tf.train.AdamOptimizer,
gradient_clip=None,
batch_size_agr=128,
batch_size_cls=128,
learning_rate_cls=1e-3,
learning_rate_agr=1e-3,
warm_start_cls=False,
warm_start_agr=False,
enable_summaries=True,
enable_summaries_per_model=False,
summary_dir=None,
summary_step_cls=1000,
summary_step_agr=1000,
logging_step_cls=1,
logging_step_agr=1,
eval_step_cls=1,
eval_step_agr=1,
checkpoints_step=None,
checkpoints_dir=None,
data_dir=None,
abs_loss_chg_tol=1e-10,
rel_loss_chg_tol=1e-7,
loss_chg_iter_below_tol=30,
use_perfect_agr=False,
use_perfect_cls=False,
ratio_valid_agr=0,
max_samples_valid_agr=None,
weight_decay_cls=None,
weight_decay_schedule_cls=None,
weight_decay_agr=None,
weight_decay_schedule_agr=None,
reg_weight_ll=0,
reg_weight_lu=0,
reg_weight_uu=0,
num_pairs_reg=100,
reg_weight_vat=0,
use_ent_min=False,
penalize_neg_agr=False,
use_l2_cls=True,
first_iter_original=True,
inductive=False,
seed=None,
eval_acc_pred_by_agr=False,
num_neighbors_pred_by_agr=20,
lr_decay_rate_cls=None,
lr_decay_steps_cls=None,
lr_decay_rate_agr=None,
lr_decay_steps_agr=None,
load_from_checkpoint=False,
use_graph=False,
always_agree=False,
add_negative_edges_agr=False):
assert not enable_summaries or (enable_summaries and
summary_dir is not None)
assert checkpoints_step is None or (checkpoints_step is not None and
checkpoints_dir is not None)
super(TrainerCotraining, self).__init__(
model=None,
abs_loss_chg_tol=abs_loss_chg_tol,
rel_loss_chg_tol=rel_loss_chg_tol,
loss_chg_iter_below_tol=loss_chg_iter_below_tol)
self.model_cls = model_cls
self.model_agr = model_agr
self.max_num_iter_cotrain = max_num_iter_cotrain
self.min_num_iter_cls = min_num_iter_cls
self.max_num_iter_cls = max_num_iter_cls
self.num_iter_after_best_val_cls = num_iter_after_best_val_cls
self.min_num_iter_agr = min_num_iter_agr
self.max_num_iter_agr = max_num_iter_agr
self.num_iter_after_best_val_agr = num_iter_after_best_val_agr
self.num_samples_to_label = num_samples_to_label
self.min_confidence_new_label = min_confidence_new_label
self.keep_label_proportions = keep_label_proportions
self.num_warm_up_iter_agr = num_warm_up_iter_agr
self.optimizer = optimizer
self.gradient_clip = gradient_clip
self.batch_size_agr = batch_size_agr
self.batch_size_cls = batch_size_cls
self.learning_rate_cls = learning_rate_cls
self.learning_rate_agr = learning_rate_agr
self.warm_start_cls = warm_start_cls
self.warm_start_agr = warm_start_agr
self.enable_summaries = enable_summaries
self.enable_summaries_per_model = enable_summaries_per_model
self.summary_step_cls = summary_step_cls
self.summary_step_agr = summary_step_agr
self.summary_dir = summary_dir
self.logging_step_cls = logging_step_cls
self.logging_step_agr = logging_step_agr
self.eval_step_cls = eval_step_cls
self.eval_step_agr = eval_step_agr
self.checkpoints_step = checkpoints_step
self.checkpoints_dir = checkpoints_dir
self.data_dir = data_dir
self.use_perfect_agr = use_perfect_agr
self.use_perfect_cls = use_perfect_cls
self.ratio_valid_agr = ratio_valid_agr
self.max_samples_valid_agr = max_samples_valid_agr
self.weight_decay_cls = weight_decay_cls
self.weight_decay_schedule_cls = weight_decay_schedule_cls
self.weight_decay_agr = weight_decay_agr
self.weight_decay_schedule_agr = weight_decay_schedule_agr
self.reg_weight_ll = reg_weight_ll
self.reg_weight_lu = reg_weight_lu
self.reg_weight_uu = reg_weight_uu
self.num_pairs_reg = num_pairs_reg
self.reg_weight_vat = reg_weight_vat
self.use_ent_min = use_ent_min
self.penalize_neg_agr = penalize_neg_agr
self.use_l2_classif = use_l2_cls
self.first_iter_original = first_iter_original
self.inductive = inductive
self.seed = seed
self.eval_acc_pred_by_agr = eval_acc_pred_by_agr
self.num_neighbors_pred_by_agr = num_neighbors_pred_by_agr
self.lr_decay_rate_cls = lr_decay_rate_cls
self.lr_decay_steps_cls = lr_decay_steps_cls
self.lr_decay_rate_agr = lr_decay_rate_agr
self.lr_decay_steps_agr = lr_decay_steps_agr
self.load_from_checkpoint = load_from_checkpoint
self.use_graph = use_graph
self.always_agree = always_agree
self.add_negative_edges_agr = add_negative_edges_agr
def _select_samples_to_label(self, data, trainer_cls, session):
"""Selects which samples to label next.
Args:
data: A CotrainData object.
trainer_cls: A TrainerClassification object.
session: A TensorFlow Session.
Returns:
selected_samples: numpy array containing the indices of the samples to be
labeled.
selected_labels: numpy array containing the indices of the labels to
assign to each of the selected nodes.
"""
# Select the candidate samples for self-labeling, and make predictions.
# We remove the validation and test samples from the unlabeled data,
# to avoid self-labeling them. We could potentially allow them to be
# self-labeled, but once a node is self-labeled its label is fixed for
# the remaining co-train iterations, so it would not take advantage
# of the improved versions of the model.
indices_unlabeled = data.get_indices_unlabeled()
eval_ind = set(data.get_indices_val()) | set(data.get_indices_test())
indices_unlabeled = np.asarray(
[ind for ind in indices_unlabeled if ind not in eval_ind])
predictions = trainer_cls.predict(
session, indices_unlabeled, is_train=False)
# Select most confident nodes. Compute confidence and most confident label,
# which will be used as the new label.
predicted_label = np.argmax(predictions, axis=-1)
confidence = predictions[np.arange(predicted_label.shape[0]),
predicted_label]
# Sort from most confident to least confident.
indices_sorted = np.argsort(confidence)[::-1]
indices_unlabeled = indices_unlabeled[indices_sorted]
confidence = confidence[indices_sorted]
predicted_label = predicted_label[indices_sorted]
# Keep only samples that have at least min_confidence_new_label confidence.
confident_indices = np.argwhere(
confidence > self.min_confidence_new_label)[:, 0]
if confident_indices.shape[0] == 0:
logging.info(
'No unlabeled nodes with confidence > %.2f. '
'Skipping self-labeling...', self.min_confidence_new_label)
selected_samples = np.zeros((0,), dtype=np.int64)
selected_labels = np.zeros((0,), dtype=np.int64)
return selected_samples, selected_labels
if data.keep_label_proportions:
# Pick the top num_samples_to_label most confident nodes, while making
# sure the ratio of the labels are kept.
# First keep only nodes which achieve the min required confidence.
num_confident = len(confident_indices)
nodes_with_min_conf = indices_unlabeled[:num_confident]
labels_with_min_conf = predicted_label[:num_confident]
# Out of these, select the desired number of samples per class,
# according to class proportions.
selected_samples = []
selected_labels = []
for label, prop in data.label_prop.items():
num_samples_to_select = int(prop * self.num_samples_to_label)
label_idxs = np.where(labels_with_min_conf == label)[0]
if len(label_idxs) <= num_samples_to_select:
# Select all available samples labeled with this label.
selected_samples.append(nodes_with_min_conf[label_idxs])
selected_labels.append(labels_with_min_conf[label_idxs])
elif num_samples_to_select > 0:
# Select the first ones, since they are sorted by confidence.
selected_samples.append(
nodes_with_min_conf[label_idxs][:num_samples_to_select])
selected_labels.append(
labels_with_min_conf[label_idxs][:num_samples_to_select])
selected_samples = np.concatenate(selected_samples)
selected_labels = np.concatenate(selected_labels)
else:
# Pick the top num_samples_to_label most confident nodes,
# irrespective of their labels.
idx = np.amax(confident_indices)
max_idx = min(self.num_samples_to_label - 1, idx)
selected_samples = indices_unlabeled[:max_idx + 1]
selected_labels = predicted_label[:max_idx + 1]
return selected_samples, selected_labels
def _extend_label_set(self, data, trainer_cls, session):
"""Extend labeled set by self-labeling with most confident predictions."""
# Select which nodes to label next, and predict their labels.
selected_samples, selected_labels = self._select_samples_to_label(
data, trainer_cls, session)
# Replace the labels of the new nodes with the predicted labels.
if selected_samples.shape[0] > 0:
data.label_samples(selected_samples, selected_labels)
return selected_samples
def train(self, data, **kwargs):
# Create a wrapper around the dataset, that also accounts for some
# cotrain specific attributes and functions.
data = CotrainDataset(
data,
keep_label_proportions=self.keep_label_proportions,
inductive=self.inductive)
if os.path.exists(self.data_dir) and self.load_from_checkpoint:
# If this session is restored from a previous run, then we load the
# self-labeled data from the last checkpoint.
logging.info('Number of labeled samples before restoring: %d',
data.num_train())
logging.info('Restoring self-labeled data from %s...', self.data_dir)
data.restore_state_from_file(self.data_dir)
logging.info('Number of labeled samples after restoring: %d',
data.num_train())
# Build graph.
logging.info('Building graph...')
# Create a iteration counter.
iter_cotrain, iter_cotrain_update = self._create_counter()
if self.use_perfect_agr:
# A perfect agreement model used for model.
trainer_agr = TrainerPerfectAgreement(data=data)
else:
with tf.variable_scope('AgreementModel'):
if self.always_agree:
trainer_agr = TrainerAgreementAlwaysAgree(data=data)
else:
trainer_agr = TrainerAgreement(
model=self.model_agr,
data=data,
optimizer=self.optimizer,
gradient_clip=self.gradient_clip,
min_num_iter=self.min_num_iter_agr,
max_num_iter=self.max_num_iter_agr,
num_iter_after_best_val=self.num_iter_after_best_val_agr,
max_num_iter_cotrain=self.max_num_iter_cotrain,
num_warm_up_iter=self.num_warm_up_iter_agr,
warm_start=self.warm_start_agr,
batch_size=self.batch_size_agr,
enable_summaries=self.enable_summaries_per_model,
summary_step=self.summary_step_agr,
summary_dir=self.summary_dir,
logging_step=self.logging_step_agr,
eval_step=self.eval_step_agr,
abs_loss_chg_tol=self.abs_loss_chg_tol,
rel_loss_chg_tol=self.rel_loss_chg_tol,
loss_chg_iter_below_tol=self.loss_chg_iter_below_tol,
checkpoints_dir=self.checkpoints_dir,
weight_decay=self.weight_decay_agr,
weight_decay_schedule=self.weight_decay_schedule_agr,
agree_by_default=False,
percent_val=self.ratio_valid_agr,
max_num_samples_val=self.max_samples_valid_agr,
seed=self.seed,
lr_decay_rate=self.lr_decay_rate_agr,
lr_decay_steps=self.lr_decay_steps_agr,
lr_initial=self.learning_rate_agr,
use_graph=self.use_graph,
add_negative_edges=self.add_negative_edges_agr)
if self.use_perfect_cls:
# A perfect classification model used for debugging purposes.
trainer_cls = TrainerPerfectClassification(data=data)
else:
with tf.variable_scope('ClassificationModel'):
trainer_cls_class = (
TrainerClassificationGCN
if isinstance(self.model_cls, GCN) else TrainerClassification)
trainer_cls = trainer_cls_class(
model=self.model_cls,
data=data,
trainer_agr=trainer_agr,
optimizer=self.optimizer,
gradient_clip=self.gradient_clip,
batch_size=self.batch_size_cls,
min_num_iter=self.min_num_iter_cls,
max_num_iter=self.max_num_iter_cls,
num_iter_after_best_val=self.num_iter_after_best_val_cls,
max_num_iter_cotrain=self.max_num_iter_cotrain,
reg_weight_ll=self.reg_weight_ll,
reg_weight_lu=self.reg_weight_lu,
reg_weight_uu=self.reg_weight_uu,
num_pairs_reg=self.num_pairs_reg,
reg_weight_vat=self.reg_weight_vat,
use_ent_min=self.use_ent_min,
enable_summaries=self.enable_summaries_per_model,
summary_step=self.summary_step_cls,
summary_dir=self.summary_dir,
logging_step=self.logging_step_cls,
eval_step=self.eval_step_cls,
abs_loss_chg_tol=self.abs_loss_chg_tol,
rel_loss_chg_tol=self.rel_loss_chg_tol,
loss_chg_iter_below_tol=self.loss_chg_iter_below_tol,
warm_start=self.warm_start_cls,
checkpoints_dir=self.checkpoints_dir,
weight_decay=self.weight_decay_cls,
weight_decay_schedule=self.weight_decay_schedule_cls,
penalize_neg_agr=self.penalize_neg_agr,
use_l2_classif=self.use_l2_classif,
first_iter_original=self.first_iter_original,
seed=self.seed,
iter_cotrain=iter_cotrain,
lr_decay_rate=self.lr_decay_rate_cls,
lr_decay_steps=self.lr_decay_steps_cls,
lr_initial=self.learning_rate_cls,
use_graph=self.use_graph)
# Create a saver which saves only the variables that we would need to
# restore in case the training process is restarted.
vars_to_save = [iter_cotrain
] + trainer_agr.vars_to_save + trainer_cls.vars_to_save
saver = tf.train.Saver(vars_to_save)
# Create a TensorFlow session. We allow soft placement in order to place
# any supported ops on GPU. The allow_growth option lets our process
# progressively use more gpu memory, per need basis, as opposed to
# allocating it all from the beginning.
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
# Create a Tensorflow summary writer, shared by all models.
summary_writer = tf.summary.FileWriter(self.summary_dir, session.graph)
# Initialize the values of all variables and the train dataset iterator.
session.run(tf.global_variables_initializer())
# If a checkpoint with the variables already exists, we restore them.
if self.checkpoints_dir:
checkpts_path_cotrain = os.path.join(self.checkpoints_dir, 'cotrain.ckpt')
if os.path.exists(checkpts_path_cotrain):
if self.load_from_checkpoint:
saver.restore(session, checkpts_path_cotrain)
else:
os.makedirs(checkpts_path_cotrain)
else:
checkpts_path_cotrain = None
# Create a progress bar showing how many samples are labeled.
pbar = tqdm(
total=data.num_samples - data.num_train(), desc='self-labeled nodes')
logging.info('Starting co-training...')
step = session.run(iter_cotrain)
stop = step >= self.max_num_iter_cotrain
best_val_acc = -1
test_acc_at_best = -1
iter_at_best = -1
while not stop:
logging.info('----------------- Cotrain step %6d -----------------', step)
# Train the agreement model.
if self.first_iter_original and step == 0:
logging.info('First iteration trains the original classifier.'
'No need to train the agreement model.')
val_acc_agree = None
acc_pred_by_agr = None
else:
val_acc_agree = trainer_agr.train(
data, session=session, summary_writer=summary_writer)
if self.eval_acc_pred_by_agr:
# Evaluate the prediction accuracy by a majority vote model using the
# agreement model.
logging.info('Computing agreement majority vote predictions on '
'test data...')
acc_pred_by_agr = trainer_agr.predict_label_by_agreement(
session, data.get_indices_test(), self.num_neighbors_pred_by_agr)
else:
acc_pred_by_agr = None
# Train classification model.
test_acc, val_acc = trainer_cls.train(
data, session=session, summary_writer=summary_writer)
if val_acc > best_val_acc:
best_val_acc = val_acc
test_acc_at_best = test_acc
iter_at_best = step
if self.enable_summaries:
summary = tf.Summary()
summary.value.add(tag='cotrain/test_acc', simple_value=test_acc)
summary.value.add(tag='cotrain/val_acc', simple_value=val_acc)
if val_acc_agree is not None:
summary.value.add(
tag='cotrain/val_acc_agree', simple_value=val_acc_agree)
if acc_pred_by_agr is not None:
summary.value.add(
tag='cotrain/acc_predict_by_agreement',
simple_value=acc_pred_by_agr)
summary_writer.add_summary(summary, step)
summary_writer.flush()
logging.info(
'--------- Cotrain step %6d | Accuracy val: %10.4f | '
'Accuracy test: %10.4f ---------', step, val_acc, test_acc)
logging.info(
'Best validation acc: %.4f, corresponding test acc: %.4f at '
'iteration %d', best_val_acc, test_acc_at_best, iter_at_best)
if self.first_iter_original and step == 0:
logging.info('No self-labeling because the first iteration trains the '
'original classifier for evaluation purposes.')
step += 1
else:
# Extend labeled set by self-labeling.
logging.info('Self-labeling...')
selected_samples = self._extend_label_set(data, trainer_cls, session)
# If no new data points are added to the training set, stop.
num_new_labels = len(selected_samples)
pbar.update(num_new_labels)
if num_new_labels > 0:
data.compute_dataset_statistics(selected_samples, summary_writer,
step)
else:
logging.info('No new samples labeled. Stopping...')
stop = True
step += 1
stop |= step >= self.max_num_iter_cotrain
# Save model and dataset state in case of process preemption.
if self.checkpoints_step and step % self.checkpoints_step == 0:
self._save_state(saver, session, data, checkpts_path_cotrain)
session.run(iter_cotrain_update)
logging.info('________________________________________________________')
logging.info(
'Best validation acc: %.4f, corresponding test acc: %.4f at '
'iteration %d', best_val_acc, test_acc_at_best, iter_at_best)
pbar.close()
def _create_counter(self):
"""Creates a cotrain iteration counter."""
iter_cotrain = tf.get_variable(
name='iter_cotrain',
initializer=tf.constant(0, name='iter_cotrain'),
use_resource=True,
trainable=False)
iter_cotrain_update = iter_cotrain.assign_add(1)
return iter_cotrain, iter_cotrain_update
def _save_state(self, saver, session, data, checkpts_path):
"""Saves the model and dataset state to files."""
# Save variable state
if checkpts_path:
logging.info('Saving cotrain checkpoint at %s.', checkpts_path)
saver.save(session, checkpts_path, write_meta_graph=False)
# Save dataset state.
if self.data_dir:
logging.info('Saving self-labeled dataset backup.')
data.save_state_to_file(self.data_dir)
|
|
from datetime import datetime, timedelta
import logging
from urllib.parse import urlencode
from django import forms
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views.generic import DeleteView, DetailView, FormView, ListView, TemplateView, UpdateView, View
from zentral.core.stores import frontend_store, stores
from zentral.core.stores.views import EventsView, FetchEventsView, EventsStoreRedirectView
from zentral.utils.charts import make_dataset
from .feeds import FeedError, sync_feed
from .forms import (CreateProbeForm, ProbeSearchForm,
InventoryFilterForm, MetadataFilterForm, PayloadFilterFormSet,
AddFeedForm, ImportFeedProbeForm,
CloneProbeForm, UpdateProbeForm)
from .models import Feed, FeedProbe, ProbeSource
logger = logging.getLogger("zentral.core.probes.views")
class IndexView(PermissionRequiredMixin, ListView):
permission_required = "probes.view_probesource"
model = ProbeSource
paginate_by = 50
template_name = "core/probes/index.html"
def get(self, request, *args, **kwargs):
qd = self.request.GET.copy()
if 'status' not in qd:
qd['status'] = 'ACTIVE'
self.form = ProbeSearchForm(qd)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx['probes'] = True
ctx['form'] = self.form
page = ctx['page_obj']
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
ctx['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
ctx['previous_url'] = "?{}".format(qd.urlencode())
bc = []
if page.number > 1:
qd = self.request.GET.copy()
qd.pop("page", None)
reset_link = "?{}".format(qd.urlencode())
else:
reset_link = None
if not self.form.is_initial():
bc.append((reverse("probes:index"), "Probes"))
bc.append((reset_link, "Search"))
else:
bc.append((reset_link, "Probes"))
bc.append((None, "page {} of {}".format(page.number, page.paginator.num_pages)))
ctx["breadcrumbs"] = bc
return ctx
class CreateProbeView(PermissionRequiredMixin, FormView):
permission_required = "probes.add_probesource"
form_class = CreateProbeForm
template_name = "core/probes/form.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
ctx['title'] = "Create event probe"
return ctx
def form_valid(self, form):
probe_source = form.save()
return HttpResponseRedirect(probe_source.get_absolute_url())
class ProbeView(PermissionRequiredMixin, DetailView):
permission_required = "probes.view_probesource"
model = ProbeSource
def get_context_data(self, **kwargs):
ctx = super(ProbeView, self).get_context_data(**kwargs)
ctx['probes'] = True
ctx['probe'] = self.probe = self.object.load()
if self.probe.loaded:
ctx['add_action_urls'] = [
(action.name, reverse("probes:edit_action", args=(self.object.id, action.name)))
for action in self.probe.not_configured_actions()
]
store_links = []
ctx['show_events_link'] = frontend_store.probe_events
store_links = []
for store in stores.iter_events_url_store_for_user("probe", self.request.user):
url = "{}?{}".format(
reverse("probes:probe_events_store_redirect", args=(self.probe.pk,)),
urlencode({"es": store.name,
"tr": ProbeEventsView.default_time_range})
)
store_links.append((url, store.name))
ctx["store_links"] = store_links
ctx["show_dashboard_link"] = frontend_store.probe_events_aggregations
return ctx
def get_template_names(self):
if self.probe.loaded:
return [self.probe.template_name]
else:
return ["core/probes/syntax_error.html"]
class ProbeDashboardView(PermissionRequiredMixin, DetailView):
permission_required = "probes.view_probesource"
model = ProbeSource
template_name = "core/probes/probe_dashboard.html"
def get_context_data(self, **kwargs):
ctx = super(ProbeDashboardView, self).get_context_data(**kwargs)
ctx['probes'] = True
ctx['probe'] = self.probe = self.object.load()
if self.probe.loaded:
ctx['aggregations'] = self.probe.get_aggregations()
return ctx
class ProbeDashboardDataView(PermissionRequiredMixin, View):
permission_required = "probes.view_probesource"
INTERVAL_DATE_FORMAT = {
"hour": "%H:%M",
"day": "%d/%m",
"week": "%d/%m",
"month": "%m/%y",
}
def get(self, response, *args, **kwargs):
probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
probe = probe_source.load()
charts = {}
from_dt = datetime.utcnow() - timedelta(days=30)
for field, results in frontend_store.get_probe_events_aggregations(probe, from_dt).items():
a_type = results["type"]
if a_type == "table":
aggregation = probe.get_aggregations()[field]
columns = aggregation["columns"]
data = []
for row in results["values"]:
for k, v in row.items():
if v is None:
row[k] = "-"
data.append(row)
top_results = aggregation.get("top", False)
if not top_results:
data.sort(key=lambda d: [d[fn].lower() for fn, _ in columns])
labels = [l for _, l in columns]
labels.append("Event count")
chart_config = {
"type": "table",
"data": {
"labels": labels,
"datasets": [
{"data": data}
]
}
}
elif a_type == "terms":
chart_config = {
"type": "doughnut",
"data": {
"labels": ["Other" if l is None else l for l, _ in results["values"]],
"datasets": [make_dataset([v for _, v in results["values"]])],
}
}
elif a_type == "date_histogram":
date_format = self.INTERVAL_DATE_FORMAT.get(results["interval"], "day")
chart_config = {
"type": "bar",
"data": {
"labels": [l.strftime(date_format) for l, _ in results["values"]],
"datasets": [make_dataset([v for _, v in results["values"]],
cycle_colors=False,
label="event number")]
}
}
else:
logger.error("Unknown aggregation type %s", a_type)
continue
charts[field] = chart_config
return JsonResponse(charts)
class EventsMixin:
permission_required = "probes.view_probesource"
store_method_scope = "probe"
def get_object(self, **kwargs):
return get_object_or_404(ProbeSource, pk=kwargs['pk'])
def get_fetch_kwargs_extra(self):
return {"probe": self.object}
def get_fetch_url(self):
return reverse("probes:fetch_probe_events", args=(self.object.pk,))
def get_redirect_url(self):
return reverse("probes:probe_events", args=(self.object.pk,))
def get_store_redirect_url(self):
return reverse("probes:probe_events_store_redirect", args=(self.object.pk,))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["probes"] = True
ctx["probe_source"] = self.object
ctx["probe"] = self.object.load()
return ctx
class ProbeEventsView(EventsMixin, EventsView):
template_name = "core/probes/probe_events.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["breadcrumbs"] = [
(reverse('probes:index'), 'Probes'),
(reverse('probes:probe', args=(self.object.pk,)), self.object.name),
(None, "events")
]
return ctx
class FetchProbeEventsView(EventsMixin, FetchEventsView):
pass
class ProbeEventsStoreRedirectView(EventsMixin, EventsStoreRedirectView):
pass
class UpdateProbeView(PermissionRequiredMixin, UpdateView):
permission_required = "probes.change_probesource"
model = ProbeSource
form_class = UpdateProbeForm
template_name = "core/probes/form.html"
def get_context_data(self, **kwargs):
ctx = super(UpdateProbeView, self).get_context_data(**kwargs)
ctx['probes'] = True
probe_source = ctx['object']
probe = probe_source.load()
ctx["probe"] = probe
return ctx
class DeleteProbeView(PermissionRequiredMixin, DeleteView):
permission_required = "probes.delete_probesource"
model = ProbeSource
template_name = "core/probes/delete.html"
success_url = reverse_lazy('probes:index')
def get_context_data(self, **kwargs):
ctx = super(DeleteProbeView, self).get_context_data(**kwargs)
ctx['probes'] = True
return ctx
class CloneProbeView(PermissionRequiredMixin, FormView):
permission_required = "probes.add_probesource"
template_name = "core/probes/clone.html"
form_class = CloneProbeForm
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_initial(self):
return {"name": "{} (clone)".format(self.probe_source.name)}
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probe_source'] = self.probe_source
return ctx
def form_valid(self, form):
new_probe = form.save(self.probe_source)
return HttpResponseRedirect(new_probe.get_absolute_url())
class ReviewProbeUpdateView(PermissionRequiredMixin, TemplateView):
permission_required = "probes.change_probesource"
template_name = "core/probes/review_update.html"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx["probes"] = True
ctx["probe_source"] = self.probe_source
ctx["update_diff"] = self.probe_source.update_diff()
return ctx
def post(self, request, *args, **kwargs):
action = request.POST["action"]
if action == "skip":
self.probe_source.skip_update()
messages.warning(request, "Probe update skipped")
elif action == "apply":
self.probe_source.apply_update()
messages.success(request, "Probe updated")
return HttpResponseRedirect(self.probe_source.get_absolute_url())
# Actions
class EditActionView(PermissionRequiredMixin, FormView):
permission_required = "probes.change_probesource"
template_name = "core/probes/action_form.html"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
self.probe = self.probe_source.load()
from zentral.core.actions import actions as available_actions
try:
self.action = available_actions[kwargs["action"]]
except KeyError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_form_class(self):
return self.action.action_form_class
def get_initial(self):
for action, action_config_d in self.probe.actions:
if action.name == self.action.name:
self.add_action = False
return action_config_d or {}
self.add_action = True
return {}
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["config_d"] = self.action.config_d
return kwargs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['action'] = self.action
ctx['add_action'] = self.add_action
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
return ctx
def form_valid(self, form):
self.probe_source.update_action(self.action.name,
form.get_action_config_d())
return super().form_valid(form)
def get_success_url(self):
return self.probe_source.get_actions_absolute_url()
class DeleteActionView(PermissionRequiredMixin, TemplateView):
permission_required = "probes.change_probesource"
template_name = "core/probes/delete_action.html"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
self.probe = self.probe_source.load()
from zentral.core.actions import actions as available_actions
try:
self.action = available_actions[kwargs["action"]]
except KeyError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['action'] = self.action
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
return ctx
def post(self, request, *args, **kwargs):
self.probe_source.delete_action(self.action.name)
return HttpResponseRedirect(self.probe_source.get_actions_absolute_url())
# Filters
class AddFilterView(PermissionRequiredMixin, FormView):
permission_required = "probes.change_probesource"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
self.probe = self.probe_source.load()
self.section = kwargs["section"]
return super().dispatch(request, *args, **kwargs)
def get_template_names(self):
return ["core/probes/{}_filter_form.html".format(self.section),
"core/probes/filter_form.html"]
def get_form_class(self):
if self.section == "inventory":
return InventoryFilterForm
elif self.section == "metadata":
return MetadataFilterForm
elif self.section == "payload":
return PayloadFilterFormSet
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
ctx['section'] = self.section
ctx['add_filter'] = True
return ctx
def form_valid(self, form):
self.probe_source.append_filter(self.section,
form.get_serialized_filter())
return super().form_valid(form)
def get_success_url(self):
return self.probe_source.get_filters_absolute_url()
class UpdateFilterView(PermissionRequiredMixin, FormView):
permission_required = "probes.change_probesource"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
self.probe = self.probe_source.load()
self.section = kwargs["section"]
self.filter_id = int(kwargs["filter_id"])
try:
self.filter = getattr(self.probe, "{}_filters".format(self.section), [])[self.filter_id]
except IndexError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_template_names(self):
return ["core/probes/{}_filter_form.html".format(self.section),
"core/probes/filter_form.html"]
def get_form_class(self):
if self.section == "inventory":
return InventoryFilterForm
elif self.section == "metadata":
return MetadataFilterForm
elif self.section == "payload":
return PayloadFilterFormSet
def get_initial(self):
return self.get_form_class().get_initial(self.filter)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
ctx['section'] = self.section
ctx['add_filter'] = False
return ctx
def form_valid(self, form):
self.probe_source.update_filter(self.section, self.filter_id,
form.get_serialized_filter())
return super().form_valid(form)
def get_success_url(self):
return self.probe_source.get_filters_absolute_url()
class DeleteFilterView(PermissionRequiredMixin, TemplateView):
permission_required = "probes.change_probesource"
template_name = "core/probes/delete_filter.html"
def dispatch(self, request, *args, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["pk"])
self.probe = self.probe_source.load()
self.filter_id = int(kwargs["filter_id"])
self.section = kwargs["section"]
try:
self.filter = getattr(self.probe, "{}_filters".format(self.section), [])[self.filter_id]
except IndexError:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
ctx['section'] = self.section
return ctx
def post(self, request, *args, **kwargs):
self.probe_source.delete_filter(self.section, self.filter_id)
return HttpResponseRedirect(self.probe_source.get_filters_absolute_url())
# Item views, used by other probes
class BaseProbeItemView(PermissionRequiredMixin, FormView):
permission_required = "probes.change_probesource"
probe_item_attribute = None
success_anchor = None
permission = None
def do_setup(self, **kwargs):
self.probe_source = get_object_or_404(ProbeSource, pk=kwargs["probe_id"])
self.redirect_url = self.probe_source.get_absolute_url(self.success_anchor)
self.probe = self.probe_source.load()
if self.permission and not getattr(self.probe, self.permission):
return HttpResponseRedirect(self.redirect_url)
def dispatch(self, request, *args, **kwargs):
response = self.do_setup(**kwargs)
if response:
return response
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["probes"] = True
ctx['probe_source'] = self.probe_source
ctx['probe'] = self.probe
ctx['add_item'] = False
ctx['cancel_url'] = self.redirect_url
return ctx
def get_success_url(self):
return self.redirect_url
def form_valid(self, form):
item_d = form.get_item_d()
func = self.get_update_func(item_d)
self.probe_source.update_body(func)
return super().form_valid(form)
class AddProbeItemView(BaseProbeItemView):
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["add_item"] = True
return ctx
def get_update_func(self, item_d):
def func(probe_d):
items = probe_d.setdefault(self.probe_item_attribute, [])
items.append(item_d)
return func
class EditProbeItemView(BaseProbeItemView):
item_pk_kwarg = None
def do_setup(self, **kwargs):
response = super().do_setup(**kwargs)
if response:
return response
self.item_id = int(kwargs[self.item_pk_kwarg])
self.items = getattr(self.probe, self.probe_item_attribute, [])
try:
self.item = self.items[self.item_id]
except IndexError:
return HttpResponseRedirect(self.redirect_url)
class UpdateProbeItemView(EditProbeItemView):
def get_initial(self):
return self.form_class.get_initial(self.item)
def get_update_func(self, item_d):
def func(probe_d):
probe_d[self.probe_item_attribute][self.item_id] = item_d
return func
class DeleteForm(forms.Form):
def get_item_d(self):
return {}
class DeleteProbeItemView(EditProbeItemView):
form_class = DeleteForm
def get_update_func(self, item_d):
def func(probe_d):
probe_d[self.probe_item_attribute].pop(self.item_id)
if not probe_d[self.probe_item_attribute]:
probe_d.pop(self.probe_item_attribute)
return func
# feeds
class FeedsView(PermissionRequiredMixin, ListView):
permission_required = "probes.view_feed"
template_name = "core/probes/feeds.html"
model = Feed
paginate_by = 10
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
# pagination
page = ctx['page_obj']
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
ctx['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
ctx['previous_url'] = "?{}".format(qd.urlencode())
bc = [(reverse('probes:index'), 'Probes')]
if page.number > 1:
qd = self.request.GET.copy()
qd.pop("page", None)
reset_link = "?{}".format(qd.urlencode())
else:
reset_link = None
paginator = page.paginator
if paginator.count:
count = paginator.count
pluralize = min(1, count - 1) * 's'
bc.extend([(reset_link, '{} feed{}'.format(count, pluralize)),
(None, "page {} of {}".format(page.number, paginator.num_pages))])
else:
bc.append((None, "no feeds"))
ctx['breadcrumbs'] = bc
return ctx
class AddFeedView(PermissionRequiredMixin, FormView):
permission_required = "probes.add_feed"
form_class = AddFeedForm
template_name = "core/probes/add_feed.html"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
ctx['title'] = "Add feed"
return ctx
def form_valid(self, form):
feed, created = form.save()
return HttpResponseRedirect(feed.get_absolute_url())
class FeedView(PermissionRequiredMixin, DetailView):
permission_required = "probes.view_feed"
template_name = "core/probes/feed.html"
model = Feed
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
ctx['active_probes'] = list(self.object.feedprobe_set.filter(archived_at__isnull=True))
return ctx
class SyncFeedView(PermissionRequiredMixin, View):
permission_required = "probes.change_feed"
def post(self, request, *args, **kwargs):
feed = get_object_or_404(Feed, pk=int(kwargs["pk"]))
try:
operations = sync_feed(feed)
except FeedError as e:
messages.error(request, "Could not sync feed. {}".format(e.message))
else:
if operations:
msg = "Probes {}.".format(", ".join("{}: {}".format(l, v) for l, v in operations.items()))
else:
msg = "No changes."
messages.info(request, "Sync OK. {}".format(msg))
return HttpResponseRedirect(feed.get_absolute_url())
class DeleteFeedView(PermissionRequiredMixin, DeleteView):
permission_required = "probes.delete_feed"
model = Feed
template_name = "core/probes/delete_feed.html"
success_url = reverse_lazy('probes:feeds')
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
ctx['title'] = 'Delete feed'
return ctx
class FeedProbeView(PermissionRequiredMixin, DetailView):
permission_required = "probes.view_feedprobe"
template_name = "core/probes/feed_probe.html"
model = FeedProbe
def get_object(self):
return get_object_or_404(self.model, pk=self.kwargs["probe_id"], feed__pk=self.kwargs["pk"])
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["probe_sources"] = list(self.object.probesource_set.all())
return ctx
class ImportFeedProbeView(PermissionRequiredMixin, FormView):
permission_required = ("probes.view_feedprobe", "probes.add_probesource")
form_class = ImportFeedProbeForm
template_name = "core/probes/import_feed_probe.html"
def dispatch(self, request, *args, **kwargs):
self.feed_probe = get_object_or_404(FeedProbe, pk=self.kwargs["probe_id"], feed__pk=self.kwargs["pk"])
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['probes'] = True
ctx['feed_probe'] = self.feed_probe
ctx['feed'] = self.feed_probe.feed
ctx['title'] = "Import feed probe"
return ctx
def form_valid(self, form):
probe_source = form.save(self.feed_probe)
return HttpResponseRedirect(probe_source.get_absolute_url())
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimenion of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device, **kwargs):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
#v = v.to(device, **kwargs)
if torch.is_tensor(v):
v_tmp = torch.empty_like(v, device=device)
v_tmp.copy_(v, **kwargs)
v = v_tmp
else:
v = v.to(device, **kwargs)
bbox.add_field(k, v)
return bbox
def pin_memory(self):
bbox = BoxList(self.bbox.pin_memory(), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "pin_memory"):
v = v.pin_memory()
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
item_type = item.type()
if item_type=='torch.ByteTensor' or item_type=='torch.cuda.ByteTensor':
item = torch.nonzero(item).squeeze(1)
item = item.to(device=self.bbox.device)
bbox = BoxList(self.bbox.index_select(0,item), self.size, self.mode)
for k, v in self.extra_fields.items():
if torch.is_tensor(v):
bbox.add_field(k, v.index_select(0, item))
else:
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
keep = keep.type(torch.ByteTensor) # cast
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
|
|
import os
import numpy as np
import argparse
import time
import librosa
import glob
from preprocess import *
from model import *
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from utility import *
def get_files_labels(pattern: str):
files = glob.glob(pattern)
names = []
for f in files:
t = os.path.normpath(f).rsplit(os.sep, maxsplit=1)[1] #'./data/processed/SF2-100008_11.npy'
name = t.rsplit('.', maxsplit=1)[0]
names.append(name)
return files, names
def train(processed_dir: str, test_wav_dir: str):
timestr = time.strftime("%Y-%m-%d-%H-%M", time.localtime()) #like '2018-10-10-14-47'
all_speaker = get_speakers()
label_enc = LabelEncoder()
label_enc.fit(all_speaker)
lambda_cycle = 10
lambda_identity = 5
lambda_classifier = 3
generator_learning_rate = 0.0001
generator_learning_rate_decay = generator_learning_rate / 20000
discriminator_learning_rate = 0.0001
discriminator_learning_rate_decay = discriminator_learning_rate / 20000
domain_classifier_learning_rate = 0.0001
domain_classifier_learning_rate_decay = domain_classifier_learning_rate / 20000
#====================load data================#
print('Loading Data...')
files, names = get_files_labels(os.path.join(processed_dir, '*.npy'))
assert len(files) > 0
normlizer = Normalizer()
exclude_dict = {} #key that not appear in the value list.(eg. SF1:[TM1**.wav,TM2**.wav,SF2**.wav ... ])
for s in all_speaker:
p = os.path.join(processed_dir, '*.npy') #'./data/processed/*.npy'
temp = [fn for fn in glob.glob(p) if fn.find(s) == -1]
exclude_dict[s] = temp
print('Loading Data Done.')
#====================create model=============#
BATCHSIZE = 1
model = StarGANVC(num_features=FEATURE_DIM, frames=FRAMES, batchsize=BATCHSIZE)
#====================start train==============#
EPOCH = 201
num_samples = len(files)
for epoch in range(1, EPOCH+1, 1):
start_time_epoch = time.time()
files_shuffled, names_shuffled = shuffle(files, names)
for i in range(num_samples // BATCHSIZE):
num_iterations = num_samples // BATCHSIZE * (epoch-1) + i
if num_iterations > 100000:
domain_classifier_learning_rate = max(0, domain_classifier_learning_rate - domain_classifier_learning_rate_decay)
generator_learning_rate = max(0, generator_learning_rate - generator_learning_rate_decay)
discriminator_learning_rate = max(0, discriminator_learning_rate - discriminator_learning_rate_decay)
if discriminator_learning_rate == 0 or generator_learning_rate == 0:
print('Early stop training.')
break
start = i * BATCHSIZE
end = (i + 1) * BATCHSIZE
if end > num_samples:
end = num_samples
X, X_t, y, y_t = [], [], [], []
#get target file paths
batchnames = names_shuffled[start:end]
pre_targets = []
for name in batchnames:
name = name.split(sep='-')[0] #SF1
t = np.random.choice(exclude_dict[name], 1)[0]
pre_targets.append(t)
#one batch train data
for one_filename, one_name, one_target in zip(files_shuffled[start:end], names_shuffled[start:end], pre_targets):
#target name
t = os.path.normpath(one_target).rsplit(os.sep, maxsplit=1)[1] #'./data/processed/SF2-100008_11.npy'
target_speaker_name = t.rsplit('.', maxsplit=1)[0].split('-')[0]
#source name
speaker_name = one_name.split('-')[0] #SF1
#shape [36,512]
one_file = np.load(one_filename)
one_file = normlizer.forward_process(one_file, speaker_name)
#shape [36,512,1]
one_file = np.reshape(one_file, [one_file.shape[0], one_file.shape[1], 1])
X.append(one_file)
#source label
temp_index = label_enc.transform([speaker_name])[0]
temp_arr_s = np.zeros([
len(all_speaker),
])
temp_arr_s[temp_index] = 1
y.append(temp_arr_s)
#load target files and labels
one_file_t = np.load(one_target)
one_file_t = normlizer.forward_process(one_file_t, target_speaker_name)
#[36,512,1]
one_file_t = np.reshape(one_file_t, [one_file_t.shape[0], one_file_t.shape[1], 1])
X_t.append(one_file_t)
#target label
temp_index_t = label_enc.transform([target_speaker_name])[0]
temp_arr_t = np.zeros([
len(all_speaker),
])
temp_arr_t[temp_index_t] = 1
y_t.append(temp_arr_t)
generator_loss, discriminator_loss, domain_classifier_loss = model.train(\
input_source=X, input_target=X_t, source_label=y, \
target_label=y_t, generator_learning_rate=generator_learning_rate,\
discriminator_learning_rate=discriminator_learning_rate,\
classifier_learning_rate=domain_classifier_learning_rate, \
lambda_identity=lambda_identity, lambda_cycle=lambda_cycle,\
lambda_classifier=lambda_classifier
)
if num_iterations % 10 == 0:
print('Iteration: {:07d},Generator Loss : {:.3f}, Discriminator Loss : {:.3f}, domain_classifier_loss: {:.3f}'\
.format(num_iterations, generator_loss, discriminator_loss, domain_classifier_loss))
#=======================test model==========================
file_path = os.path.join('out/', f'{epoch}_{timestr}')
if epoch % 10 == 0:
print('============test model============')
#out put path
os.makedirs(file_path, exist_ok=True)
tempfiles = []
for one_speaker in all_speaker:
p = os.path.join(test_wav_dir, f'{one_speaker}/*.wav')
wavs = glob.glob(p)
tempfiles.append(wavs[0])
tempfiles.append(wavs[1]) #'./data/fourspeakers_test/200006.wav'
for one_file in tempfiles:
_, speaker, name = os.path.normpath(one_file).rsplit(os.sep, maxsplit=2)
wav_, fs = librosa.load(one_file, sr=SAMPLE_RATE, mono=True, dtype=np.float64)
wav, pad_length = pad_wav_to_get_fixed_frames(wav_, frames=FRAMES)
f0, timeaxis = pyworld.harvest(wav, fs)
sp = pyworld.cheaptrick(wav, f0, timeaxis, fs, fft_size=FFTSIZE)
ap = pyworld.d4c(wav, f0, timeaxis, fs, fft_size=FFTSIZE)
coded_sp = pyworld.code_spectral_envelope(sp, fs, FEATURE_DIM)
#one audio file to multiple slices(that's one_test_sample),every slice is an input
one_test_sample = []
csp_transpose = coded_sp.T #36x512 36x128...
for i in range(0, csp_transpose.shape[1] - FRAMES + 1, FRAMES):
t = csp_transpose[:, i:i + FRAMES]
t = normlizer.forward_process(t, speaker)
t = np.reshape(t, [t.shape[0], t.shape[1], 1])
one_test_sample.append(t)
#target label 1->2, 2->3, 3->0, 0->1
one_test_sample_label = np.zeros([len(one_test_sample), len(all_speaker)])
temp_index = label_enc.transform([speaker])[0]
temp_index = (temp_index + 2) % len(all_speaker)
for i in range(len(one_test_sample)):
one_test_sample_label[i][temp_index] = 1
#get conversion target name ,like SF1
target_name = label_enc.inverse_transform([temp_index])[0]
generated_results = model.test(one_test_sample, one_test_sample_label)
reshpaped_res = []
for one in generated_results:
t = np.reshape(one, [one.shape[0], one.shape[1]])
t = normlizer.backward_process(t, target_name)
reshpaped_res.append(t)
#collect the generated slices, and concate the array to be a whole representation of the whole audio
c = []
for one_slice in reshpaped_res:
one_slice = np.ascontiguousarray(one_slice.T, dtype=np.float64)
decoded_sp = pyworld.decode_spectral_envelope(one_slice, SAMPLE_RATE, fft_size=FFTSIZE)
c.append(decoded_sp)
concated = np.concatenate((c), axis=0)
#f0 convert
f0 = normlizer.pitch_conversion(f0, speaker, target_name)
synwav = pyworld.synthesize(f0, concated, ap, fs)
#remove synthesized wav paded length
synwav = synwav[:-pad_length]
#save synthesized wav to file
wavname = f'{speaker}-{target_name}+{name}'
wavpath = os.path.join(file_path, 'wavs')
if not os.path.exists(wavpath):
os.makedirs(wavpath, exist_ok=True)
librosa.output.write_wav(f'{wavpath}/{wavname}', synwav, sr=fs)
print(f'[save]:{wavpath}/{wavname}')
print('============test finished!============')
if epoch % 10 == 0:
print('============save model============')
model_path = os.path.join(file_path, 'model')
os.makedirs(model_path, exist_ok=True)
print(f'[save]: {model_path}')
model.save(directory=model_path, filename=MODEL_NAME)
end_time_epoch = time.time()
time_elapsed_epoch = end_time_epoch - start_time_epoch
print('Time Elapsed for Epoch %d: %02d:%02d:%02d' % (epoch, time_elapsed_epoch // 3600, (time_elapsed_epoch % 3600 // 60),
(time_elapsed_epoch % 60 // 1)))
if __name__ == '__main__':
processed_dir = './data/processed'
test_wav_dir = './data/fourspeakers_test'
parser = argparse.ArgumentParser(description='Train StarGAN Voice conversion model.')
parser.add_argument('--processed_dir', type=str, help='train dataset directory that contains processed npy and npz files', default=processed_dir)
parser.add_argument('--test_wav_dir', type=str, help='test directory that contains raw audios', default=test_wav_dir)
argv = parser.parse_args()
processed_dir = argv.processed_dir
test_wav_dir = argv.test_wav_dir
start_time = time.time()
train(processed_dir, test_wav_dir)
end_time = time.time()
time_elapsed = end_time - start_time
print('Training Time: %02d:%02d:%02d' % \
(time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1)))
|
|
"""
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..externals.six.moves import xrange
from ..utils import check_array
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Parameters
----------
emp_cov : array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov : array-like
Shrunk covariance.
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = check_array(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
shrinkage : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage` : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
assume_centered : Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
self : object
Returns self.
"""
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:],
X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size : int,
Size of the blocks into which the covariance matrix will be split.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
if n_features > block_size:
raise MemoryError("LW: n_features is too large, " +
"try increasing block_size")
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size : int,
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_` : float, 0 <= shrinkage <= 1
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True,
block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov : array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix.
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_` : float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
|
# Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob.exc as webexc
import neutron
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import router
from neutron.common import config
from neutron import context as q_context
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.db import routerservicetype_db as rst_db
from neutron.db import servicetype_db as st_db
from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import routerservicetype as rst
from neutron.plugins.common import constants
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
from neutron import wsgi
_uuid = test_api_v2._uuid
_get_path = test_api_v2._get_path
extensions_path = ':'.join(neutron.extensions.__path__)
class RouterServiceInsertionTestPlugin(
rst_db.RouterServiceTypeDbMixin,
rsi_db.RoutedServiceInsertionDbMixin,
st_db.ServiceTypeManager,
lb_db.LoadBalancerPluginDb,
l3_db.L3_NAT_db_mixin,
db_base_plugin_v2.NeutronDbPluginV2):
supported_extension_aliases = [
"router", "router-service-type", "routed-service-insertion",
"service-type", "lbaas"
]
def create_router(self, context, router):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).create_router(
context, router)
service_type_id = router['router'].get(rst.SERVICE_TYPE_ID)
if service_type_id is not None:
r[rst.SERVICE_TYPE_ID] = service_type_id
self._process_create_router_service_type_id(
context, r)
return r
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
r = super(RouterServiceInsertionTestPlugin, self).get_router(
context, id, fields)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
r[rst.SERVICE_TYPE_ID] = rsbind['service_type_id']
return r
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
super(RouterServiceInsertionTestPlugin, self).delete_router(
context, id)
rsbind = self._get_router_service_type_id_binding(context, id)
if rsbind:
raise Exception('Router service-type binding is not deleted')
def create_resource(self, res, context, resource, model):
with context.session.begin(subtransactions=True):
method_name = "create_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, resource)
router_id = resource[res].get(rsi.ROUTER_ID)
if router_id is not None:
o[rsi.ROUTER_ID] = router_id
self._process_create_resource_router_id(
context, o, model)
return o
def get_resource(self, res, context, id, fields, model):
method_name = "get_{0}".format(res)
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
o = method(context, id, fields)
if fields is None or rsi.ROUTER_ID in fields:
rsbind = self._get_resource_router_id_binding(
context, model, id)
if rsbind:
o[rsi.ROUTER_ID] = rsbind['router_id']
return o
def delete_resource(self, res, context, id, model):
method_name = "delete_{0}".format(res)
with context.session.begin(subtransactions=True):
method = getattr(super(RouterServiceInsertionTestPlugin, self),
method_name)
method(context, id)
self._delete_resource_router_id_binding(context, id, model)
if self._get_resource_router_id_binding(context, model, id):
raise Exception("{0}-router binding is not deleted".format(res))
def create_pool(self, context, pool):
return self.create_resource('pool', context, pool, lb_db.Pool)
def get_pool(self, context, id, fields=None):
return self.get_resource('pool', context, id, fields, lb_db.Pool)
def delete_pool(self, context, id):
return self.delete_resource('pool', context, id, lb_db.Pool)
def create_health_monitor(self, context, health_monitor):
return self.create_resource('health_monitor', context, health_monitor,
lb_db.HealthMonitor)
def get_health_monitor(self, context, id, fields=None):
return self.get_resource('health_monitor', context, id, fields,
lb_db.HealthMonitor)
def delete_health_monitor(self, context, id):
return self.delete_resource('health_monitor', context, id,
lb_db.HealthMonitor)
def create_vip(self, context, vip):
return self.create_resource('vip', context, vip, lb_db.Vip)
def get_vip(self, context, id, fields=None):
return self.get_resource(
'vip', context, id, fields, lb_db.Vip)
def delete_vip(self, context, id):
return self.delete_resource('vip', context, id, lb_db.Vip)
def stats(self, context, pool_id):
pass
class RouterServiceInsertionTestCase(testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
def setUp(self):
super(RouterServiceInsertionTestCase, self).setUp()
plugin = (
"neutron.tests.unit.test_routerserviceinsertion."
"RouterServiceInsertionTestPlugin"
)
# point config file to: neutron/tests/etc/neutron.conf.test
self.config_parse()
#just stubbing core plugin with LoadBalancer plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('service_plugins', [])
cfg.CONF.set_override('quota_router', -1, group='QUOTAS')
# Ensure existing ExtensionManager is not used
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.LOADBALANCER: RouterServiceInsertionTestPlugin()}
)
extensions.PluginAwareExtensionManager._instance = ext_mgr
router.APIRouter()
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
self._service_type_id = _uuid()
self._setup_core_resources()
# FIXME (markmcclain): The test setup makes it difficult to add core
# via the api. In the interim we'll create directly using the plugin with
# the side effect of polluting the fixture database until tearDown.
def tearDown(self):
self.api = None
super(RouterServiceInsertionTestCase, self).tearDown()
def _setup_core_resources(self):
core_plugin = neutron.manager.NeutronManager.get_plugin()
self._network = core_plugin.create_network(
q_context.get_admin_context(),
{
'network':
{
'tenant_id': self._tenant_id,
'name': 'test net',
'admin_state_up': True,
'shared': False,
}
}
)
self._subnet = core_plugin.create_subnet(
q_context.get_admin_context(),
{
'subnet':
{
'network_id': self._network['id'],
'name': 'test subnet',
'cidr': '192.168.1.0/24',
'ip_version': 4,
'gateway_ip': '192.168.1.1',
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
}
}
)
self._subnet_id = self._subnet['id']
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _router_create(self, service_type_id=None):
data = {
"router": {
"tenant_id": self._tenant_id,
"name": "test",
"admin_state_up": True,
"service_type_id": service_type_id,
}
}
res = self._do_request('POST', _get_path('routers'), data)
return res['router']
def test_router_create_no_service_type_id(self):
router = self._router_create()
self.assertIsNone(router.get('service_type_id'))
def test_router_create_with_service_type_id(self):
router = self._router_create(self._service_type_id)
self.assertEqual(router['service_type_id'], self._service_type_id)
def test_router_get(self):
router = self._router_create(self._service_type_id)
res = self._do_request('GET',
_get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['service_type_id'],
self._service_type_id)
def _test_router_update(self, update_service_type_id):
router = self._router_create(self._service_type_id)
router_id = router['id']
new_name = _uuid()
data = {
"router": {
"name": new_name,
"admin_state_up": router['admin_state_up'],
}
}
if update_service_type_id:
data["router"]["service_type_id"] = _uuid()
with testlib_api.ExpectedException(
webexc.HTTPClientError) as ctx_manager:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
res = self._do_request(
'PUT', _get_path('routers/{0}'.format(router_id)), data)
res = self._do_request(
'GET', _get_path('routers/{0}'.format(router['id'])))
self.assertEqual(res['router']['name'], new_name)
def test_router_update_with_service_type_id(self):
self._test_router_update(True)
def test_router_update_without_service_type_id(self):
self._test_router_update(False)
def test_router_delete(self):
router = self._router_create(self._service_type_id)
self._do_request(
'DELETE', _get_path('routers/{0}'.format(router['id'])))
def _test_lb_setup(self):
router = self._router_create(self._service_type_id)
self._router_id = router['id']
def _test_pool_setup(self):
self._test_lb_setup()
def _test_health_monitor_setup(self):
self._test_lb_setup()
def _test_vip_setup(self):
self._test_pool_setup()
pool = self._pool_create(self._router_id)
self._pool_id = pool['id']
def _create_resource(self, res, data):
resp = self._do_request('POST', _get_path('lb/{0}s'.format(res)), data)
return resp[res]
def _pool_create(self, router_id=None):
data = {
"pool": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"subnet_id": self._subnet_id,
"lb_method": "ROUND_ROBIN",
"router_id": router_id
}
}
return self._create_resource('pool', data)
def _pool_update_attrs(self, pool):
uattr = {}
fields = [
'name', 'description', 'lb_method',
'health_monitors', 'admin_state_up'
]
for field in fields:
uattr[field] = pool[field]
return uattr
def _health_monitor_create(self, router_id=None):
data = {
"health_monitor": {
"tenant_id": self._tenant_id,
"type": "HTTP",
"delay": 1,
"timeout": 1,
"max_retries": 1,
"router_id": router_id
}
}
return self._create_resource('health_monitor', data)
def _health_monitor_update_attrs(self, hm):
uattr = {}
fields = ['delay', 'timeout', 'max_retries']
for field in fields:
uattr[field] = hm[field]
return uattr
def _vip_create(self, router_id=None):
data = {
"vip": {
"tenant_id": self._tenant_id,
"name": "test",
"protocol": "HTTP",
"protocol_port": 80,
"subnet_id": self._subnet_id,
"pool_id": self._pool_id,
"address": "192.168.1.102",
"connection_limit": 100,
"admin_state_up": True,
"router_id": router_id
}
}
return self._create_resource('vip', data)
def _vip_update_attrs(self, vip):
uattr = {}
fields = [
'name', 'description', 'pool_id', 'connection_limit',
'admin_state_up'
]
for field in fields:
uattr[field] = vip[field]
return uattr
def _test_resource_create(self, res):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
self.assertEqual(obj['router_id'], self._router_id)
def _test_resource_update(self, res, update_router_id,
update_attr, update_value):
getattr(self, "_test_{0}_setup".format(res))()
obj = getattr(self, "_{0}_create".format(res))(self._router_id)
uattrs = getattr(self, "_{0}_update_attrs".format(res))(obj)
uattrs[update_attr] = update_value
data = {res: uattrs}
if update_router_id:
uattrs['router_id'] = self._router_id
with testlib_api.ExpectedException(
webexc.HTTPClientError) as ctx_manager:
self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
self.assertEqual(ctx_manager.exception.code, 400)
else:
self._do_request(
'PUT',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])), data)
updated = self._do_request(
'GET',
_get_path('lb/{0}s/{1}'.format(res, obj['id'])))
self.assertEqual(updated[res][update_attr], update_value)
def _test_resource_delete(self, res, with_router_id):
getattr(self, "_test_{0}_setup".format(res))()
func = getattr(self, "_{0}_create".format(res))
if with_router_id:
obj = func(self._router_id)
else:
obj = func()
self._do_request(
'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id'])))
def test_pool_create(self):
self._test_resource_create('pool')
def test_pool_update_with_router_id(self):
self._test_resource_update('pool', True, 'name', _uuid())
def test_pool_update_without_router_id(self):
self._test_resource_update('pool', False, 'name', _uuid())
def test_pool_delete_with_router_id(self):
self._test_resource_delete('pool', True)
def test_pool_delete_without_router_id(self):
self._test_resource_delete('pool', False)
def test_health_monitor_create(self):
self._test_resource_create('health_monitor')
def test_health_monitor_update_with_router_id(self):
self._test_resource_update('health_monitor', True, 'timeout', 2)
def test_health_monitor_update_without_router_id(self):
self._test_resource_update('health_monitor', False, 'timeout', 2)
def test_health_monitor_delete_with_router_id(self):
self._test_resource_delete('health_monitor', True)
def test_health_monitor_delete_without_router_id(self):
self._test_resource_delete('health_monitor', False)
def test_vip_create(self):
self._test_resource_create('vip')
def test_vip_update_with_router_id(self):
self._test_resource_update('vip', True, 'name', _uuid())
def test_vip_update_without_router_id(self):
self._test_resource_update('vip', False, 'name', _uuid())
def test_vip_delete_with_router_id(self):
self._test_resource_delete('vip', True)
def test_vip_delete_without_router_id(self):
self._test_resource_delete('vip', False)
|
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2013 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import unittest, tempfile, os, stat, shutil, logging
import subprocess as sp
from xml.dom.minidom import getDOMImplementation
DOM_IMPL = getDOMImplementation()
import pydoop
from pydoop.jc import jc_wrapper
import pydoop.utils as pu
import pydoop.hadoop_utils as hu
pp = pydoop.import_version_specific_module('_pipes')
CONFIGURE_EXAMPLES = {
# jobconf_key/attr_name: [type, str_value]
'a' : ['str', 'this is a string'],
'b' : ['int', '22'],
'b1' : ['int', '23'],
'c' : ['float', '0.22'],
'c1' : ['float', '0.0202'],
'c2' : ['float', '.22'],
'c3' : ['float', '1.0e-22'],
'd' : ['bool' , 'false'],
'd1' : ['bool' , 'true'],
'e' : ['log_level' , 'DEBUG'],
}
class Obj(object):
pass
def serialize(t):
tt = type(t)
if tt == int:
return pp.serialize_int(t)
if tt == float:
return pp.serialize_float(t)
if tt == str:
return pp.serialize_string(t)
class TestUtils(unittest.TestCase):
def test_jc_configure_plain(self):
w = CONFIGURE_EXAMPLES
d = {}
for k in w.keys():
d[k] = w[k][1]
jc = pp.get_JobConf_object(d)
o = Obj()
for k in w.keys():
self.assertTrue(jc.hasKey(k))
if w[k][0] == 'str':
pu.jc_configure(o, jc, k, k)
self.assertEqual(getattr(o,k), w[k][1])
elif w[k][0] == 'int':
pu.jc_configure_int(o, jc, k, k)
self.assertEqual(getattr(o, k), int(w[k][1]))
elif w[k][0] == 'bool':
pu.jc_configure_bool(o, jc, k, k)
self.assertEqual(getattr(o, k), w[k][1] == 'true')
elif w[k][0] == 'float':
pu.jc_configure_float(o, jc, k, k)
self.assertAlmostEqual(getattr(o, k), float(w[k][1]))
elif w[k][0] == 'log_level':
pu.jc_configure_log_level(o, jc, k, k)
self.assertEqual(getattr(o, k), getattr(logging, w[k][1]))
def test_jc_configure_default(self):
w = CONFIGURE_EXAMPLES
d = {}
for k in w.keys():
d[k] = w[k][1]
jc = pp.get_JobConf_object(d)
o = Obj()
for k in w.keys():
nk = 'not-here-%s' % k
self.assertFalse(jc.hasKey(nk))
if w[k][0] == 'str':
pu.jc_configure(o, jc, nk, k, w[k][1])
self.assertEqual(getattr(o,k), w[k][1])
elif w[k][0] == 'int':
pu.jc_configure_int(o, jc, nk, k, int(w[k][1]))
self.assertEqual(getattr(o, k), int(w[k][1]))
elif w[k][0] == 'bool':
pu.jc_configure_bool(o, jc, nk, k, w[k][1]=='true')
self.assertEqual(getattr(o, k), w[k][1] == 'true')
elif w[k][0] == 'log_level':
pu.jc_configure_log_level(o, jc, nk, k, w[k][1])
self.assertEqual(getattr(o, k), getattr(logging, w[k][1]))
def test_jc_configure_no_default(self):
w = CONFIGURE_EXAMPLES
d = {}
for k in w.keys():
d[k] = w[k][1]
jc = pp.get_JobConf_object(d)
o = Obj()
for k in w.keys():
nk = 'not-here-%s' % k
self.assertFalse(jc.hasKey(nk))
self.assertRaises(UserWarning, pu.jc_configure, o, jc, nk, k)
def test_hadoop_serialization(self):
for k in range(-256,256, 4):
b = pp.serialize_int(k)
(o, v) = pp.deserialize_int(b, 0)
self.assertEqual(k, v)
for k in range(-32000,32000, 100):
b = pp.serialize_int(k)
(o, v) = pp.deserialize_int(b, 0)
self.assertEqual(k, v)
for k in [-0.233, 232.11, 1e-9, 1e+12]:
b = pp.serialize_float(k)
(o, v) = pp.deserialize_float(b, 0)
self.assertAlmostEqual((k-v)/(k+v), 0, 5)
for k in ['fpp', 'eee', 'ddd']:
b = pp.serialize_string(k)
(o, v) = pp.deserialize_string(b, 0)
self.assertEqual(k, v)
things = [1233, 0.333, 'hello_there', '22', -0.5]
b = ''
for t in things:
b += serialize(t)
o = 0
for t in things:
equal_test = self.assertEqual
if type(t) == int:
(o, v) = pp.deserialize_int(b, o)
elif type(t) == float:
(o, v) = pp.deserialize_float(b, o)
equal_test = self.assertAlmostEqual
elif type(t) == str:
(o, v) = pp.deserialize_string(b, o)
equal_test(v, t)
class TestHadoopUtils(unittest.TestCase):
def setUp(self):
self.hadoop_version = "0.20.2"
self.hadoop_version_tuple = (0, 20, 2)
self.hadoop_home = tempfile.mkdtemp(prefix="pydoop_test_")
self.hadoop_conf = os.path.join(self.hadoop_home, "conf")
os.mkdir(self.hadoop_conf)
self.bindir = os.path.join(self.hadoop_home, "bin")
os.mkdir(self.bindir)
self.hadoop_exe = os.path.join(self.bindir, "hadoop")
with open(self.hadoop_exe, "w") as fo:
fd = fo.fileno()
os.fchmod(fd, os.fstat(fd).st_mode | stat.S_IXUSR)
fo.write("#!/bin/bash\necho Hadoop %s\n" % self.hadoop_version)
self.orig_env = os.environ.copy()
self.pf = hu.PathFinder()
def tearDown(self):
os.environ.clear()
os.environ.update(self.orig_env)
shutil.rmtree(self.hadoop_home)
def test_HadoopVersion(self):
for vs, main, cdh, ext in [
("0.20.2", (0, 20, 2), (), ()),
("0.20.203.0", (0, 20, 203, 0), (), ()),
("0.20.2-cdh3u4", (0, 20, 2), (3, 4), ()),
("1.0.4-SNAPSHOT", (1, 0, 4), (), ("SNAPSHOT",)),
("2.0.0-mr1-cdh4.1.0", (2, 0, 0), (4, 1, 0), ("mr1",)),
]:
v = hu.HadoopVersion(vs)
for name, attr in ("main", main), ("cdh", cdh), ("ext", ext):
self.assertEqual(getattr(v, name), attr)
self.assertEqual(v.is_cloudera(), len(v.cdh) > 0)
self.assertEqual(v.tuple, main+cdh+ext)
for s in "bla", '0.20.str':
self.assertRaises(hu.HadoopVersionError, hu.HadoopVersion, s)
def test_get_hadoop_exec(self):
# hadoop home as argument
self.assertEqual(
self.pf.hadoop_exec(hadoop_home=self.hadoop_home), self.hadoop_exe
)
# hadoop home from environment
os.environ["HADOOP_HOME"] = self.hadoop_home
self.assertEqual(self.pf.hadoop_exec(), self.hadoop_exe)
# no hadoop home in environment
del os.environ["HADOOP_HOME"]
os.environ["PATH"] = self.bindir
hadoop_exec = self.pf.hadoop_exec()
cmd = sp.Popen([hadoop_exec, "version"], env=self.orig_env,
stdout=sp.PIPE, stderr=sp.PIPE)
out, _ = cmd.communicate()
self.assertTrue(out.splitlines()[0].strip().lower().startswith("hadoop"))
def test_get_hadoop_version(self):
# hadoop version from environment
vs = "0.21.0"
vt = (0, 21, 0)
os.environ["HADOOP_VERSION"] = vs
for hadoop_home in None, self.hadoop_home:
self.assertEqual(self.pf.hadoop_version(hadoop_home), vs)
vinfo = self.pf.hadoop_version_info(hadoop_home)
self.assertEqual(vinfo.main, vt)
self.assertEqual(vinfo.tuple, vt)
# hadoop version from executable
self.pf.reset()
del os.environ["HADOOP_VERSION"]
vinfo = self.pf.hadoop_version_info(self.hadoop_home)
self.assertEqual(vinfo.main, self.hadoop_version_tuple)
self.assertEqual(vinfo.tuple, self.hadoop_version_tuple)
def test_get_hadoop_params(self):
self.__check_params()
self.__check_params('', {})
self.__check_params('<?xml version="1.0"?>', {})
doc = DOM_IMPL.createDocument(None, "configuration", None)
self.__check_params(doc.toxml(), {})
root = doc.documentElement
prop = root.appendChild(doc.createElement("property"))
self.__check_params(doc.toxml(), {})
for s in "name", "value":
n = prop.appendChild(doc.createElement(s))
n.appendChild(doc.createTextNode(s.upper()))
self.__check_params(doc.toxml(), {"NAME": "VALUE"})
def __check_params(self, xml_content=None, expected=None):
if expected is None:
expected = {}
xml_fn = os.path.join(self.hadoop_conf, "core-site.xml")
if os.path.exists(xml_fn):
os.remove(xml_fn)
if xml_content is not None:
with open(xml_fn, "w") as fo:
fo.write(xml_content)
params = self.pf.hadoop_params(hadoop_conf=self.hadoop_conf)
self.assertEqual(params, expected)
class TestJcWrapper(unittest.TestCase):
def setUp(self):
self.data = {
'int': '2',
'float': '3.0',
'bool_t': 't',
'bool_T': 'T',
'bool_true': 'true',
'bool_True': 'TRUE',
'bool_TRUE': 'TRUE',
'bool_1': '1',
'bool_f': 'f',
'bool_F': 'F',
'bool_false': 'false',
'bool_False': 'False',
'bool_FALSE': 'FALSE',
'bool_0': '0',
'str': 'str',
}
self.jc = pp.get_JobConf_object(self.data)
self.wrapper = jc_wrapper(self.jc)
def test_has_key(self):
self.assertTrue(self.wrapper.has_key('int'))
self.assertFalse(self.wrapper.has_key('no_key'))
def test_simple_fetch(self):
self.assertEqual('str', self.wrapper['str'])
def test_fetch_missing(self):
self.assertRaises(KeyError, lambda x: self.wrapper[x], 'no_key')
def test_simple_get(self):
self.assertEqual("2", self.wrapper.get('int'))
self.assertTrue(self.wrapper.get('no_key') is None)
# ensure caching doesn't cause problems
self.assertEqual("2", self.wrapper.get('int'))
self.assertTrue(self.wrapper.get('no_key') is None)
def test_simple_get_default(self):
self.assertEqual("default", self.wrapper.get('no_key', "default"))
def test_get_boolean(self):
for k in (
'bool_t', 'bool_T', 'bool_true', 'bool_True', 'bool_TRUE', 'bool_1'
):
self.assertEqual(True, self.wrapper.get_boolean(k))
for k in (
'bool_f', 'bool_F', 'bool_false', 'bool_False', 'bool_FALSE', 'bool_0'
):
self.assertEqual(False, self.wrapper.get_boolean(k))
# repeat to test cache
for k in (
'bool_f', 'bool_F', 'bool_false', 'bool_False', 'bool_FALSE', 'bool_0'
):
self.assertEqual(False, self.wrapper.get_boolean(k))
def test_get_bad_boolean(self):
self.assertRaises(ValueError, self.wrapper.get_boolean, 'float')
def test_get_missing_boolean(self):
self.assertEqual(True, self.wrapper.get_boolean('no_key', True))
def test_get_int(self):
self.assertEqual(2, self.wrapper.get_int('int'))
# cache test
self.assertEqual(2, self.wrapper.get_int('int'))
def test_get_bad_int(self):
self.assertRaises(ValueError, self.wrapper.get_int, 'bool_f')
def test_get_float_as_int(self):
self.assertEqual(3, self.wrapper.get_int('float'))
def test_get_missing_int(self):
self.assertEqual(42, self.wrapper.get_int('no_key', 42))
def test_get_float(self):
self.assertEqual(3.0, self.wrapper.get_float('float'))
def test_get_bad_float(self):
self.assertRaises(ValueError, self.wrapper.get_float, 'bool_f')
def test_get_int_as_float(self):
self.assertEqual(2.0, self.wrapper.get_float('int'))
def test_get_missing_float(self):
self.assertEqual(42.0, self.wrapper.get_float('no_key', 42.0))
def suite():
suite = unittest.TestSuite()
suite.addTest(TestUtils('test_jc_configure_plain'))
suite.addTest(TestUtils('test_jc_configure_default'))
suite.addTest(TestUtils('test_jc_configure_no_default'))
suite.addTest(TestUtils('test_hadoop_serialization'))
suite.addTest(TestHadoopUtils('test_HadoopVersion'))
suite.addTest(TestHadoopUtils('test_get_hadoop_exec'))
suite.addTest(TestHadoopUtils('test_get_hadoop_version'))
suite.addTest(TestHadoopUtils('test_get_hadoop_params'))
suite.addTest(TestJcWrapper('test_has_key'))
suite.addTest(TestJcWrapper('test_simple_fetch'))
suite.addTest(TestJcWrapper('test_fetch_missing'))
suite.addTest(TestJcWrapper('test_simple_get'))
suite.addTest(TestJcWrapper('test_simple_get_default'))
suite.addTest(TestJcWrapper('test_get_boolean'))
suite.addTest(TestJcWrapper('test_get_bad_boolean'))
suite.addTest(TestJcWrapper('test_get_missing_boolean'))
suite.addTest(TestJcWrapper('test_get_int'))
suite.addTest(TestJcWrapper('test_get_bad_int'))
suite.addTest(TestJcWrapper('test_get_float_as_int'))
suite.addTest(TestJcWrapper('test_get_missing_int'))
suite.addTest(TestJcWrapper('test_get_float'))
suite.addTest(TestJcWrapper('test_get_bad_float'))
suite.addTest(TestJcWrapper('test_get_int_as_float'))
suite.addTest(TestJcWrapper('test_get_missing_float'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
|
|
import os
import sys
import pytest
import glob
from tests.lib.path import Path
def test_install_from_future_wheel_version(script, data):
"""
Test installing a future wheel
"""
from tests.lib import TestFailure
package = data.packages.join("futurewheel-3.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
package = data.packages.join("futurewheel-1.9-py2.py3-none-any.whl")
result = script.pip(
'install', package, '--no-index', expect_error=False,
expect_stderr=True,
)
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_broken_wheel(script, data):
"""
Test that installing a broken wheel fails properly
"""
from tests.lib import TestFailure
package = data.packages.join("brokenwheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_wheel(script, data):
"""
Test installing from a wheel (that has a script)
"""
result = script.pip(
'install', 'has.script==1.0', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'has.script-1.0.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
script_file = script.bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_with_extras(script, data):
"""
Test installing from a wheel with extras.
"""
result = script.pip(
'install', 'complex-dist[simple]', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'complex_dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
def test_install_from_wheel_file(script, data):
"""
Test installing directly from a wheel file.
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
installer = dist_info_folder / 'INSTALLER'
assert installer in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
with open(script.base_path / installer, 'rb') as installer_file:
installer_details = installer_file.read()
assert installer_details == b'pip\n'
installer_temp = dist_info_folder / 'INSTALLER.pip'
assert installer_temp not in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
# header installs are broke in pypy virtualenvs
# https://github.com/pypa/virtualenv/issues/510
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_install_from_wheel_with_headers(script, data):
"""
Test installing from a wheel file with headers
"""
package = data.packages.join("headers.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'headers.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
@pytest.mark.network
def test_install_wheel_with_target(script, data):
"""
Test installing a wheel using pip install --target
"""
script.pip('install', 'wheel')
target_dir = script.scratch_path / 'target'
result = script.pip(
'install', 'simple.dist==0.1', '-t', target_dir,
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'target' / 'simpledist' in result.files_created, (
str(result)
)
@pytest.mark.network
def test_install_wheel_with_target_and_data_files(script, data):
"""
Test for issue #4092. It will be checked that a data_files specification in
setup.py is handled correctly when a wheel is installed with the --target
option.
The setup() for the wheel 'prjwithdatafile-1.0-py2.py3-none-any.whl' is as
follows ::
setup(
name='prjwithdatafile',
version='1.0',
packages=['prjwithdatafile'],
data_files=[
(r'packages1', ['prjwithdatafile/README.txt']),
(r'packages2', ['prjwithdatafile/README.txt'])
]
)
"""
script.pip('install', 'wheel')
target_dir = script.scratch_path / 'prjwithdatafile'
package = data.packages.join("prjwithdatafile-1.0-py2.py3-none-any.whl")
result = script.pip('install', package,
'-t', target_dir,
'--no-index',
expect_error=False)
assert (Path('scratch') / 'prjwithdatafile' / 'packages1' / 'README.txt'
in result.files_created), str(result)
assert (Path('scratch') / 'prjwithdatafile' / 'packages2' / 'README.txt'
in result.files_created), str(result)
assert (Path('scratch') / 'prjwithdatafile' / 'lib' / 'python'
not in result.files_created), str(result)
def test_install_wheel_with_root(script, data):
"""
Test installing a wheel using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', 'simple.dist==0.1', '--root', root_dir,
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'root' in result.files_created
def test_install_wheel_with_prefix(script, data):
"""
Test installing a wheel using pip install --prefix
"""
prefix_dir = script.scratch_path / 'prefix'
result = script.pip(
'install', 'simple.dist==0.1', '--prefix', prefix_dir,
'--no-index', '--find-links=' + data.find_links,
)
if hasattr(sys, "pypy_version_info"):
lib = Path('scratch') / 'prefix' / 'site-packages'
else:
lib = Path('scratch') / 'prefix' / 'lib'
assert lib in result.files_created
def test_install_from_wheel_installs_deps(script, data):
"""
Test can install dependencies of wheels
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, package,
)
result.assert_installed('source', editable=False)
def test_install_from_wheel_no_deps(script, data):
"""
Test --no-deps works with wheel installs
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, '--no-deps',
package,
)
pkg_folder = script.site_packages / 'source'
assert pkg_folder not in result.files_created
@pytest.mark.network
def test_install_user_wheel(script, virtualenv, data):
"""
Test user install from wheel (that has a script)
"""
virtualenv.system_site_packages = True
script.pip('install', 'wheel')
result = script.pip(
'install', 'has.script==1.0', '--user', '--no-index',
'--find-links=' + data.find_links,
)
egg_info_folder = script.user_site / 'has.script-1.0.dist-info'
assert egg_info_folder in result.files_created, str(result)
script_file = script.user_bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_gen_entrypoint(script, data):
"""
Test installing scripts (entry points are generated)
"""
result = script.pip(
'install', 'script.wheel1a==0.1', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
if os.name != "nt":
assert bool(os.access(script.base_path / wrapper_file, os.X_OK))
def test_install_from_wheel_gen_uppercase_entrypoint(script, data):
"""
Test installing scripts with uppercase letters in entry point names
"""
result = script.pip(
'install', 'console-scripts-uppercase==1.0', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
# Case probably doesn't make any difference on NT
wrapper_file = script.bin / 'cmdName.exe'
else:
wrapper_file = script.bin / 'cmdName'
assert wrapper_file in result.files_created
if os.name != "nt":
assert bool(os.access(script.base_path / wrapper_file, os.X_OK))
def test_install_from_wheel_with_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved)
"""
result = script.pip(
'install', 'script.wheel2a==0.1', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
def test_install_from_wheel_no_setuptools_entrypoint(script, data):
"""
Test that when we generate scripts, any existing setuptools wrappers in
the wheel are skipped.
"""
result = script.pip(
'install', 'script.wheel1==0.1', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
wrapper_helper = script.bin / 't1-script.py'
# The wheel has t1.exe and t1-script.py. We will be generating t1 or
# t1.exe depending on the platform. So we check that the correct wrapper
# is present and that the -script.py helper has been skipped. We can't
# easily test that the wrapper from the wheel has been skipped /
# overwritten without getting very platform-dependent, so omit that.
assert wrapper_file in result.files_created
assert wrapper_helper not in result.files_created
def test_skipping_setuptools_doesnt_skip_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved even when we skip
setuptools wrappers)
"""
result = script.pip(
'install', 'script.wheel2==0.1', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
wrapper_helper = script.bin / 't1-script.py'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
assert wrapper_helper not in result.files_created
def test_install_from_wheel_gui_entrypoint(script, data):
"""
Test installing scripts (gui entry points are generated)
"""
result = script.pip(
'install', 'script.wheel3==0.1', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
def test_wheel_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert any(exists)
def test_wheel_no_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--no-compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_from_wheel_uninstalls_old_version(script, data):
# regression test for https://github.com/pypa/pip/issues/1825
package = data.packages.join("simplewheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
package = data.packages.join("simplewheel-2.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'simplewheel-2.0.dist-info'
assert dist_info_folder in result.files_created
dist_info_folder = script.site_packages / 'simplewheel-1.0.dist-info'
assert dist_info_folder not in result.files_created
def test_wheel_compile_syntax_error(script, data):
package = data.packages.join("compilewheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', '--compile', package, '--no-index')
assert 'yield from' not in result.stdout
assert 'SyntaxError: ' not in result.stdout
|
|
"""Support for UV data from openuv.io."""
import asyncio
import logging
from pyopenuv import Client
from pyopenuv.errors import OpenUvError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS,
CONF_SENSORS,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.service import verify_domain_control
from .config_flow import configured_instances
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_OPENUV_CLIENT = "data_client"
DATA_OPENUV_LISTENER = "data_listener"
DATA_PROTECTION_WINDOW = "protection_window"
DATA_UV = "uv"
DEFAULT_ATTRIBUTION = "Data provided by OpenUV"
NOTIFICATION_ID = "openuv_notification"
NOTIFICATION_TITLE = "OpenUV Component Setup"
TOPIC_UPDATE = f"{DOMAIN}_data_update"
TYPE_CURRENT_OZONE_LEVEL = "current_ozone_level"
TYPE_CURRENT_UV_INDEX = "current_uv_index"
TYPE_CURRENT_UV_LEVEL = "current_uv_level"
TYPE_MAX_UV_INDEX = "max_uv_index"
TYPE_PROTECTION_WINDOW = "uv_protection_window"
TYPE_SAFE_EXPOSURE_TIME_1 = "safe_exposure_time_type_1"
TYPE_SAFE_EXPOSURE_TIME_2 = "safe_exposure_time_type_2"
TYPE_SAFE_EXPOSURE_TIME_3 = "safe_exposure_time_type_3"
TYPE_SAFE_EXPOSURE_TIME_4 = "safe_exposure_time_type_4"
TYPE_SAFE_EXPOSURE_TIME_5 = "safe_exposure_time_type_5"
TYPE_SAFE_EXPOSURE_TIME_6 = "safe_exposure_time_type_6"
BINARY_SENSORS = {TYPE_PROTECTION_WINDOW: ("Protection Window", "mdi:sunglasses")}
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)): vol.All(
cv.ensure_list, [vol.In(BINARY_SENSORS)]
)
}
)
SENSORS = {
TYPE_CURRENT_OZONE_LEVEL: ("Current Ozone Level", "mdi:vector-triangle", "du"),
TYPE_CURRENT_UV_INDEX: ("Current UV Index", "mdi:weather-sunny", "index"),
TYPE_CURRENT_UV_LEVEL: ("Current UV Level", "mdi:weather-sunny", None),
TYPE_MAX_UV_INDEX: ("Max UV Index", "mdi:weather-sunny", "index"),
TYPE_SAFE_EXPOSURE_TIME_1: (
"Skin Type 1 Safe Exposure Time",
"mdi:timer",
"minutes",
),
TYPE_SAFE_EXPOSURE_TIME_2: (
"Skin Type 2 Safe Exposure Time",
"mdi:timer",
"minutes",
),
TYPE_SAFE_EXPOSURE_TIME_3: (
"Skin Type 3 Safe Exposure Time",
"mdi:timer",
"minutes",
),
TYPE_SAFE_EXPOSURE_TIME_4: (
"Skin Type 4 Safe Exposure Time",
"mdi:timer",
"minutes",
),
TYPE_SAFE_EXPOSURE_TIME_5: (
"Skin Type 5 Safe Exposure Time",
"mdi:timer",
"minutes",
),
TYPE_SAFE_EXPOSURE_TIME_6: (
"Skin Type 6 Safe Exposure Time",
"mdi:timer",
"minutes",
),
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All(
cv.ensure_list, [vol.In(SENSORS)]
)
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ELEVATION): float,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the OpenUV component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_OPENUV_CLIENT] = {}
hass.data[DOMAIN][DATA_OPENUV_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
identifier = "{0}, {1}".format(
conf.get(CONF_LATITUDE, hass.config.latitude),
conf.get(CONF_LONGITUDE, hass.config.longitude),
)
if identifier in configured_instances(hass):
return True
data = {
CONF_API_KEY: conf[CONF_API_KEY],
CONF_BINARY_SENSORS: conf[CONF_BINARY_SENSORS],
CONF_SENSORS: conf[CONF_SENSORS],
}
if CONF_LATITUDE in conf:
data[CONF_LATITUDE] = conf[CONF_LATITUDE]
if CONF_LONGITUDE in conf:
data[CONF_LONGITUDE] = conf[CONF_LONGITUDE]
if CONF_ELEVATION in conf:
data[CONF_ELEVATION] = conf[CONF_ELEVATION]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=data
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up OpenUV as config entry."""
_verify_domain_control = verify_domain_control(hass, DOMAIN)
try:
websession = aiohttp_client.async_get_clientsession(hass)
openuv = OpenUV(
Client(
config_entry.data[CONF_API_KEY],
config_entry.data.get(CONF_LATITUDE, hass.config.latitude),
config_entry.data.get(CONF_LONGITUDE, hass.config.longitude),
websession,
altitude=config_entry.data.get(CONF_ELEVATION, hass.config.elevation),
),
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)
),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)
),
)
await openuv.async_update()
hass.data[DOMAIN][DATA_OPENUV_CLIENT][config_entry.entry_id] = openuv
except OpenUvError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady
for component in ("binary_sensor", "sensor"):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
@_verify_domain_control
async def update_data(service):
"""Refresh all OpenUV data."""
_LOGGER.debug("Refreshing all OpenUV data")
await openuv.async_update()
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.services.async_register(DOMAIN, "update_data", update_data)
@_verify_domain_control
async def update_uv_index_data(service):
"""Refresh OpenUV UV index data."""
_LOGGER.debug("Refreshing OpenUV UV index data")
await openuv.async_update_uv_index_data()
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.services.async_register(DOMAIN, "update_uv_index_data", update_uv_index_data)
@_verify_domain_control
async def update_protection_data(service):
"""Refresh OpenUV protection window data."""
_LOGGER.debug("Refreshing OpenUV protection window data")
await openuv.async_update_protection_data()
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.services.async_register(
DOMAIN, "update_protection_data", update_protection_data
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_OPENUV_CLIENT].pop(config_entry.entry_id)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in ("binary_sensor", "sensor")
]
await asyncio.gather(*tasks)
return True
class OpenUV:
"""Define a generic OpenUV object."""
def __init__(self, client, binary_sensor_conditions, sensor_conditions):
"""Initialize."""
self.binary_sensor_conditions = binary_sensor_conditions
self.client = client
self.data = {}
self.sensor_conditions = sensor_conditions
async def async_update_protection_data(self):
"""Update binary sensor (protection window) data."""
if TYPE_PROTECTION_WINDOW in self.binary_sensor_conditions:
try:
resp = await self.client.uv_protection_window()
self.data[DATA_PROTECTION_WINDOW] = resp["result"]
except OpenUvError as err:
_LOGGER.error("Error during protection data update: %s", err)
self.data[DATA_PROTECTION_WINDOW] = {}
return
async def async_update_uv_index_data(self):
"""Update sensor (uv index, etc) data."""
if any(c in self.sensor_conditions for c in SENSORS):
try:
data = await self.client.uv_index()
self.data[DATA_UV] = data
except OpenUvError as err:
_LOGGER.error("Error during uv index data update: %s", err)
self.data[DATA_UV] = {}
return
async def async_update(self):
"""Update sensor/binary sensor data."""
tasks = [self.async_update_protection_data(), self.async_update_uv_index_data()]
await asyncio.gather(*tasks)
class OpenUvEntity(Entity):
"""Define a generic OpenUV entity."""
def __init__(self, openuv):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = None
self.openuv = openuv
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return self._name
|
|
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import Polygon
from shapely.ops import cascaded_union, unary_union
from shapely.validation import explain_validity
import pyclipper
def _coords(shape):
"""
Return a list of lists of coordinates of the polygon. The list consists
firstly of the list of exterior coordinates followed by zero or more lists
of any interior coordinates.
"""
assert shape.geom_type == 'Polygon'
coords = [list(shape.exterior.coords)]
for interior in shape.interiors:
coords.append(list(interior.coords))
return coords
def _drop_degenerate_inners(shape):
"""
Drop degenerate (zero-size) inners from the polygon.
This is implemented as dropping anything with a size less than 0.5, as the
polygon is in integer coordinates and the smallest valid inner would be a
triangle with height and width 1.
"""
assert shape.geom_type == 'Polygon'
new_inners = []
for inner in shape.interiors:
# need to make a polygon of the linearring to get the _filled_ area of
# the closed ring.
if abs(Polygon(inner).area) >= 0.5:
new_inners.append(inner)
return Polygon(shape.exterior, new_inners)
def _contour_to_poly(contour):
poly = Polygon(contour)
if not poly.is_valid:
poly = poly.buffer(0)
assert poly.is_valid, \
"Contour %r did not make valid polygon %s because %s" \
% (contour, poly.wkt, explain_validity(poly))
return poly
def _union_in_blocks(contours, block_size):
"""
Generator which yields a valid shape for each block_size multiple of
input contours. This merges together the contours for each block before
yielding them.
"""
n_contours = len(contours)
for i in range(0, n_contours, block_size):
j = min(i + block_size, n_contours)
inners = []
for c in contours[i:j]:
p = _contour_to_poly(c)
if p.type == 'Polygon':
inners.append(p)
elif p.type == 'MultiPolygon':
inners.extend(p.geoms)
holes = unary_union(inners)
assert holes.is_valid
yield holes
def _generate_polys(contours):
"""
Generator which yields a valid polygon for each contour input.
"""
for c in contours:
p = _contour_to_poly(c)
yield p
def _polytree_node_to_shapely(node):
"""
Recurses down a Clipper PolyTree, extracting the results as Shapely
objects.
Returns a tuple of (list of polygons, list of children)
"""
polygons = []
children = []
for ch in node.Childs:
p, c = _polytree_node_to_shapely(ch)
polygons.extend(p)
children.extend(c)
if node.IsHole:
# check expectations: a node should be a hole, _or_ return children.
# this is because children of holes must be outers, and should be on
# the polygons list.
assert len(children) == 0
if node.Contour:
children = [node.Contour]
else:
children = []
elif node.Contour:
poly = _contour_to_poly(node.Contour)
# we add each inner one-by-one so that we can reject them individually
# if they cause the polygon to become invalid. if the shape has lots
# of inners, then this can mean a proportional amount of work, and may
# take 1,000s of seconds. instead, we can group inners together, which
# reduces the number of times we call the expensive 'difference'
# method.
block_size = 200
if len(children) > block_size:
inners = _union_in_blocks(children, block_size)
else:
inners = _generate_polys(children)
for inner in inners:
# the difference of two valid polygons may fail, and in this
# situation we'd like to be able to display the polygon anyway.
# so we discard the bad inner and continue.
#
# see test_polygon_inners_crossing_outer for a test case.
try:
diff = poly.difference(inner)
except:
continue
if not diff.is_valid:
diff = diff.buffer(0)
# keep this for when https://trac.osgeo.org/geos/ticket/789 is
# resolved.
#
# assert diff.is_valid, \
# "Difference of %s and %s did not make valid polygon %s " \
# " because %s" \
# % (poly.wkt, inner.wkt, diff.wkt, explain_validity(diff))
#
# NOTE: this throws away the inner ring if we can't produce a
# valid difference. not ideal, but we'd rather produce something
# that's valid than nothing.
if diff.is_valid:
poly = diff
assert poly.is_valid
if poly.type == 'MultiPolygon':
polygons.extend(poly.geoms)
else:
polygons.append(poly)
children = []
else:
# check expectations: this branch gets executed if this node is not a
# hole, and has no contour. in that situation we'd expect that it has
# no children, as it would not be possible to subtract children from
# an empty outer contour.
assert len(children) == 0
return (polygons, children)
def _polytree_to_shapely(tree):
polygons, children = _polytree_node_to_shapely(tree)
# expect no left over children - should all be incorporated into polygons
# by the time recursion returns to the root.
assert len(children) == 0
union = cascaded_union(polygons)
assert union.is_valid
return union
def make_valid_pyclipper(shape):
"""
Use the pyclipper library to "union" a polygon on its own. This operation
uses the even-odd rule to determine which points are in the interior of
the polygon, and can reconstruct the orientation of the polygon from that.
The pyclipper library is robust, and uses integer coordinates, so should
not produce any additional degeneracies.
Before cleaning the polygon, we remove all degenerate inners. This is
useful to remove inners which have collapsed to points or lines, which can
interfere with the cleaning process.
"""
# drop all degenerate inners
clean_shape = _drop_degenerate_inners(shape)
pc = pyclipper.Pyclipper()
try:
pc.AddPaths(_coords(clean_shape), pyclipper.PT_SUBJECT, True)
# note: Execute2 returns the polygon tree, not the list of paths
result = pc.Execute2(pyclipper.CT_UNION, pyclipper.PFT_EVENODD)
except pyclipper.ClipperException:
return MultiPolygon([])
return _polytree_to_shapely(result)
def make_valid_polygon(shape):
"""
Make a polygon valid. Polygons can be invalid in many ways, such as
self-intersection, self-touching and degeneracy. This process attempts to
make a polygon valid while retaining as much of its extent or area as
possible.
First, we call pyclipper to robustly union the polygon. Using this on its
own appears to be good for "cleaning" the polygon.
This might result in polygons which still have degeneracies according to
the OCG standard of validity - as pyclipper does not consider these to be
invalid. Therefore we follow by using the `buffer(0)` technique to attempt
to remove any remaining degeneracies.
"""
assert shape.geom_type == 'Polygon'
shape = make_valid_pyclipper(shape)
assert shape.is_valid
return shape
def make_valid_multipolygon(shape):
new_g = []
for g in shape.geoms:
if g.is_empty:
continue
valid_g = make_valid_polygon(g)
if valid_g.type == 'MultiPolygon':
new_g.extend(valid_g.geoms)
else:
new_g.append(valid_g)
return MultiPolygon(new_g)
def make_it_valid(shape):
"""
Attempt to make any polygon or multipolygon valid.
"""
if shape.is_empty:
return shape
elif shape.type == 'MultiPolygon':
shape = make_valid_multipolygon(shape)
elif shape.type == 'Polygon':
shape = make_valid_polygon(shape)
return shape
|
|
# -*- coding: utf-8 -*-
import json
import os
import time
import unittest.mock
from .. import base
from girder.api import access
from girder.api.describe import describeRoute
from girder.api.rest import getApiUrl, loadmodel, Resource
from girder.constants import AccessType, registerAccessFlag, ROOT_DIR, VERSION
from girder.exceptions import AccessException, ValidationException
from girder.models.collection import Collection
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.group import Group
from girder.models.item import Item
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingDefault, SettingKey
from girder.utility import config
class TestEndpoints(Resource):
def __init__(self):
super().__init__()
self.resourceName = 'test_endpoints'
self.route('GET', ('loadmodel_with_flags', ':id'), self.loadModelFlags)
@access.public
@describeRoute(None)
@loadmodel(model='user', level=AccessType.READ, requiredFlags='my_key')
def loadModelFlags(self, user, params):
return 'success'
def setUpModule():
testServer = base.startServer()
testServer.root.api.v1.test_endpoints = TestEndpoints()
def tearDownModule():
base.stopServer()
class SystemTestCase(base.TestCase):
"""
Contains tests of the /system API endpoints.
"""
def setUp(self):
super().setUp()
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@girder.test' % num)
for num in [0, 1]]
self.group = Group().createGroup('test group', creator=self.users[1])
def tearDown(self):
# Restore the state of the plugins configuration
conf = config.getConfig()
if 'plugins' in conf:
del conf['plugins']
def testGetVersion(self):
resp = self.request(path='/system/version', method='GET')
self.assertEqual(resp.json['release'], VERSION['release'])
def testSettings(self):
users = self.users
# Only admins should be able to get or set settings
for method in ('GET', 'PUT', 'DELETE'):
resp = self.request(path='/system/setting', method=method, params={
'key': 'foo',
'value': 'bar'
}, user=users[1])
self.assertStatus(resp, 403)
# Only valid setting keys should be allowed
resp = self.request(path='/system/setting', method='PUT', params={
'key': 'foo',
'value': 'bar'
}, user=users[0])
self.assertStatus(resp, 400)
self.assertEqual(resp.json['field'], 'key')
# Only a valid JSON list is permitted
resp = self.request(path='/system/setting', method='GET', params={
'list': json.dumps('not_a_list')
}, user=users[0])
self.assertStatus(resp, 400)
resp = self.request(path='/system/setting', method='PUT', params={
'list': json.dumps('not_a_list')
}, user=users[0])
self.assertStatus(resp, 400)
# Set an invalid setting value, should fail
resp = self.request(path='/system/setting', method='PUT', params={
'key': SettingKey.BANNER_COLOR,
'value': 'bar'
}, user=users[0])
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'The banner color must be a hex color triplet')
# Set a valid value
resp = self.request(path='/system/setting', method='PUT', params={
'key': SettingKey.BANNER_COLOR,
'value': '#121212'
}, user=users[0])
self.assertStatusOk(resp)
# We should now be able to retrieve it
resp = self.request(path='/system/setting', method='GET', params={
'key': SettingKey.BANNER_COLOR
}, user=users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json, '#121212')
# We should now clear the setting
resp = self.request(path='/system/setting', method='DELETE', params={
'key': SettingKey.BANNER_COLOR
}, user=users[0])
self.assertStatusOk(resp)
# Setting should now be default
setting = Setting().get(SettingKey.BANNER_COLOR)
self.assertEqual(setting, SettingDefault.defaults[SettingKey.BANNER_COLOR])
# We should also be able to put several setting using a JSON list
resp = self.request(path='/system/setting', method='PUT', params={
'list': json.dumps([
{'key': SettingKey.BANNER_COLOR, 'value': '#121212'},
{'key': SettingKey.COOKIE_LIFETIME, 'value': None},
])
}, user=users[0])
self.assertStatusOk(resp)
# We can get a list as well
resp = self.request(path='/system/setting', method='GET', params={
'list': json.dumps([
SettingKey.BANNER_COLOR,
SettingKey.COOKIE_LIFETIME,
])
}, user=users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json[SettingKey.BANNER_COLOR], '#121212')
# Try to set each key in turn to test the validation. First test with
# am invalid value, then test with the default value. If the value
# 'bad' won't trigger a validation error, the key should be present in
# the badValues table.
badValues = {
SettingKey.BRAND_NAME: '',
SettingKey.BANNER_COLOR: '',
SettingKey.EMAIL_FROM_ADDRESS: '',
SettingKey.PRIVACY_NOTICE: '',
SettingKey.CONTACT_EMAIL_ADDRESS: '',
SettingKey.EMAIL_HOST: {},
SettingKey.SMTP_HOST: '',
SettingKey.SMTP_PASSWORD: {},
SettingKey.SMTP_USERNAME: {},
SettingKey.CORS_ALLOW_ORIGIN: {},
SettingKey.CORS_ALLOW_METHODS: {},
SettingKey.CORS_ALLOW_HEADERS: {},
SettingKey.CORS_EXPOSE_HEADERS: {},
}
allKeys = dict.fromkeys(SettingDefault.defaults.keys())
allKeys.update(badValues)
for key in allKeys:
resp = self.request(path='/system/setting', method='PUT', params={
'key': key,
'value': badValues.get(key, 'bad')
}, user=users[0])
self.assertStatus(resp, 400)
resp = self.request(path='/system/setting', method='PUT', params={
'key': key,
'value': json.dumps(SettingDefault.defaults.get(key, ''))
}, user=users[0])
self.assertStatusOk(resp)
resp = self.request(path='/system/setting', method='PUT', params={
'list': json.dumps([{'key': key, 'value': None}])
}, user=users[0])
self.assertStatusOk(resp)
def testCheck(self):
resp = self.request(path='/token/session', method='GET')
self.assertStatusOk(resp)
token = resp.json['token']
# 'basic' mode should work for a token or for anonymous
resp = self.request(path='/system/check', token=token)
self.assertStatusOk(resp)
check = resp.json
self.assertLess(check['bootTime'], time.time())
resp = self.request(path='/system/check')
self.assertStatusOk(resp)
check = resp.json
self.assertLess(check['bootTime'], time.time())
# but should fail for 'quick' mode
resp = self.request(path='/system/check', token=token, params={
'mode': 'quick'})
self.assertStatus(resp, 401)
# Admin can ask for any mode
resp = self.request(path='/system/check', user=self.users[0])
self.assertStatusOk(resp)
check = resp.json
self.assertLess(check['bootTime'], time.time())
self.assertNotIn('cherrypyThreadsInUse', check)
resp = self.request(path='/system/check', user=self.users[0], params={
'mode': 'quick'})
self.assertStatusOk(resp)
check = resp.json
self.assertLess(check['bootTime'], time.time())
self.assertGreaterEqual(check['cherrypyThreadsInUse'], 1)
self.assertIn('rss', check['processMemory'])
resp = self.request(path='/system/check', user=self.users[0], params={
'mode': 'slow'})
self.assertStatusOk(resp)
check = resp.json
self.assertGreater(check['girderDiskUsage']['free'], 0)
resp = self.request(path='/system/check', method='PUT',
user=self.users[0], params={'progress': True})
self.assertStatusOk(resp)
# tests that check repair of different models are convered in the
# individual models' tests
def testConsistencyCheck(self):
user = self.users[0]
c1 = Collection().createCollection('c1', user)
f1 = Folder().createFolder(c1, 'f1', parentType='collection')
Folder().createFolder(c1, 'f2', parentType='collection')
f3 = Folder().createFolder(user, 'f3', parentType='user')
Folder().createFolder(user, 'f4', parentType='user')
i1 = Item().createItem('i1', user, f1)
i2 = Item().createItem('i2', user, f1)
Item().createItem('i3', user, f1)
i4 = Item().createItem('i4', user, f3)
Item().createItem('i5', user, f3)
Item().createItem('i6', user, f3)
assetstore = {'_id': 0}
File().createFile(user, i1, 'foo', 7, assetstore)
File().createFile(user, i1, 'foo', 13, assetstore)
File().createFile(user, i2, 'foo', 19, assetstore)
File().createFile(user, i4, 'foo', 23, assetstore)
self.assertEqual(39, Collection().load(c1['_id'], force=True)['size'])
self.assertEqual(39, Folder().load(f1['_id'], force=True)['size'])
self.assertEqual(23, Folder().load(f3['_id'], force=True)['size'])
self.assertEqual(20, Item().load(i1['_id'], force=True)['size'])
self.assertEqual(23, User().load(user['_id'], force=True)['size'])
resp = self.request(path='/system/check', user=user, method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['baseParentsFixed'], 0)
self.assertEqual(resp.json['orphansRemoved'], 0)
self.assertEqual(resp.json['sizesChanged'], 0)
Item().update({'_id': i1['_id']}, update={'$set': {'baseParentId': None}})
resp = self.request(path='/system/check', user=user, method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['baseParentsFixed'], 1)
self.assertEqual(resp.json['orphansRemoved'], 0)
self.assertEqual(resp.json['sizesChanged'], 0)
Collection().update({'_id': c1['_id']}, update={'$set': {'size': 0}})
Folder().update({'_id': f1['_id']}, update={'$set': {'size': 0}})
Item().update({'_id': i1['_id']}, update={'$set': {'size': 0}})
resp = self.request(path='/system/check', user=user, method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['baseParentsFixed'], 0)
self.assertEqual(resp.json['orphansRemoved'], 0)
self.assertEqual(resp.json['sizesChanged'], 3)
self.assertEqual(39, Collection().load(c1['_id'], force=True)['size'])
self.assertEqual(39, Folder().load(f1['_id'], force=True)['size'])
self.assertEqual(23, Folder().load(f3['_id'], force=True)['size'])
self.assertEqual(20, Item().load(i1['_id'], force=True)['size'])
self.assertEqual(23, User().load(user['_id'], force=True)['size'])
Folder().collection.delete_one({'_id': f3['_id']})
resp = self.request(path='/system/check', user=user, method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['baseParentsFixed'], 0)
self.assertEqual(resp.json['orphansRemoved'], 3)
self.assertEqual(resp.json['sizesChanged'], 0)
self.assertEqual(
0, User().load(user['_id'], force=True)['size'])
def testLogRoute(self):
logRoot = os.path.join(ROOT_DIR, 'tests', 'cases', 'dummylogs')
config.getConfig()['logging'] = {'log_root': logRoot}
resp = self.request(path='/system/log', user=self.users[1], params={
'log': 'error',
'bytes': 0
})
self.assertStatus(resp, 403)
resp = self.request(path='/system/log', user=self.users[0], params={
'log': 'error',
'bytes': 0
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(
self.getBody(resp),
'=== Last 12 bytes of %s/error.log: ===\n\nHello world\n' % logRoot)
resp = self.request(path='/system/log', user=self.users[0], params={
'log': 'error',
'bytes': 6
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(
self.getBody(resp),
'=== Last 6 bytes of %s/error.log: ===\n\nworld\n' % logRoot)
resp = self.request(path='/system/log', user=self.users[0], params={
'log': 'error',
'bytes': 18
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(
self.getBody(resp),
'=== Last 18 bytes of %s/error.log: ===\n\nmonde\nHello world\n' % logRoot)
resp = self.request(path='/system/log', user=self.users[0], params={
'log': 'info',
'bytes': 6
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(
self.getBody(resp),
'=== Last 0 bytes of %s/info.log: ===\n\n' % logRoot)
del config.getConfig()['logging']
def testLogLevel(self):
from girder import logger, _attachFileLogHandlers
_attachFileLogHandlers()
for handler in logger.handlers:
if handler._girderLogHandler == 'info':
handler.emit = unittest.mock.MagicMock()
infoEmit = handler.emit
elif handler._girderLogHandler == 'error':
handler.emit = unittest.mock.MagicMock()
errorEmit = handler.emit
# We should be an info level
resp = self.request(path='/system/log/level', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json, 'DEBUG')
levels = [{
'level': 'INFO',
'debug': (0, 0),
'info': (1, 0),
'error': (0, 1),
}, {
'level': 'ERROR',
'debug': (0, 0),
'info': (0, 0),
'error': (0, 1),
}, {
'level': 'CRITICAL',
'debug': (0, 0),
'info': (0, 0),
'error': (0, 0),
}, {
'level': 'DEBUG',
'debug': (1, 0),
'info': (1, 0),
'error': (0, 1),
}]
for levelTest in levels:
resp = self.request(
method='PUT', path='/system/log/level', user=self.users[0],
params={'level': levelTest['level']})
self.assertStatusOk(resp)
self.assertEqual(resp.json, levelTest['level'])
resp = self.request(path='/system/log/level', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json, levelTest['level'])
for level in ('debug', 'info', 'error'):
infoCount, errorCount = infoEmit.call_count, errorEmit.call_count
getattr(logger, level)('log entry %s %s' % (
levelTest['level'], level))
self.assertEqual(infoEmit.call_count, infoCount + levelTest[level][0])
self.assertEqual(errorEmit.call_count, errorCount + levelTest[level][1])
# Try to set a bad log level
resp = self.request(
method='PUT', path='/system/log/level', user=self.users[0],
params={'level': 'NOSUCHLEVEL'})
self.assertStatus(resp, 400)
self.assertIn('Invalid value for level', resp.json['message'])
def testAccessFlags(self):
resp = self.request('/system/access_flag')
self.assertStatusOk(resp)
self.assertEqual(resp.json, {})
registerAccessFlag('my_key', name='hello', description='a custom flag')
resp = self.request('/system/access_flag')
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'my_key': {
'name': 'hello',
'description': 'a custom flag',
'admin': False
}
})
self.users[1] = User().load(self.users[1]['_id'], force=True)
user = self.users[1]
# Manage custom access flags on an access controlled resource
self.assertFalse(User().hasAccessFlags(user, user, flags=['my_key']))
# Admin should always have permission
self.assertTrue(User().hasAccessFlags(user, self.users[0], flags=['my_key']))
# Test the requireAccessFlags method
with self.assertRaises(AccessException):
User().requireAccessFlags(user, user=user, flags='my_key')
User().requireAccessFlags(user, user=self.users[0], flags='my_key')
acl = User().getFullAccessList(user)
self.assertEqual(acl['users'][0]['flags'], [])
# Test loadmodel requiredFlags argument via REST endpoint
resp = self.request(
'/test_endpoints/loadmodel_with_flags/%s' % user['_id'], user=self.users[1])
self.assertStatus(resp, 403)
user = User().setAccessList(self.users[0], access={
'users': [{
'id': self.users[1]['_id'],
'level': AccessType.ADMIN,
'flags': ['my_key', 'not a registered flag']
}],
'groups': [{
'id': self.group['_id'],
'level': AccessType.ADMIN,
'flags': ['my_key']
}]
}, save=True)
resp = self.request(
'/test_endpoints/loadmodel_with_flags/%s' % user['_id'], user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(resp.json, 'success')
# Only registered flags should be stored
acl = User().getFullAccessList(user)
self.assertEqual(acl['users'][0]['flags'], ['my_key'])
self.assertTrue(User().hasAccessFlags(user, user, flags=['my_key']))
# Create an admin-only access flag
registerAccessFlag('admin_flag', name='admin flag', admin=True)
# Non-admin shouldn't be able to set it
user = User().setAccessList(self.users[0], access={
'users': [{
'id': self.users[1]['_id'],
'level': AccessType.ADMIN,
'flags': ['admin_flag']
}],
'groups': []
}, save=True, user=self.users[1])
acl = User().getFullAccessList(user)
self.assertEqual(acl['users'][0]['flags'], [])
# Admin user should be able to set it
user = User().setAccessList(self.users[1], access={
'users': [{
'id': self.users[1]['_id'],
'level': AccessType.ADMIN,
'flags': ['admin_flag']
}],
'groups': [{
'id': self.group['_id'],
'level': AccessType.ADMIN,
'flags': ['admin_flag']
}]
}, save=True, user=self.users[0])
acl = User().getFullAccessList(user)
self.assertEqual(acl['users'][0]['flags'], ['admin_flag'])
# An already-enabled admin-only flag should stay enabled for non-admin user
user = User().setAccessList(self.users[1], access={
'users': [{
'id': self.users[1]['_id'],
'level': AccessType.ADMIN,
'flags': ['my_key', 'admin_flag']
}],
'groups': [{
'id': self.group['_id'],
'level': AccessType.ADMIN,
'flags': ['admin_flag']
}]
}, save=True, user=self.users[1])
acl = User().getFullAccessList(user)
self.assertEqual(set(acl['users'][0]['flags']), {'my_key', 'admin_flag'})
self.assertEqual(acl['groups'][0]['flags'], ['admin_flag'])
# Test setting public flags on a collection and folder
collectionModel = Collection()
folderModel = Folder()
itemModel = Item()
collection = collectionModel.createCollection('coll', creator=self.users[0], public=True)
folder = folderModel.createFolder(
collection, 'folder', parentType='collection', creator=self.users[0])
# Add an item to the folder so we can test AclMixin flag behavior
item = itemModel.createItem(folder=folder, name='test', creator=self.users[0])
folder = folderModel.setUserAccess(
folder, self.users[1], level=AccessType.ADMIN, save=True, currentUser=self.users[0])
with self.assertRaises(AccessException):
collectionModel.requireAccessFlags(collection, user=None, flags='my_key')
# Test AclMixin flag behavior
with self.assertRaises(AccessException):
itemModel.requireAccessFlags(item, user=None, flags='my_key')
self.assertFalse(itemModel.hasAccessFlags(item, user=None, flags='my_key'))
collection = collectionModel.setAccessList(
collection, access=collection['access'], save=True, recurse=True, user=self.users[0],
publicFlags=['my_key'])
collectionModel.requireAccessFlags(collection, user=None, flags='my_key')
# Make sure recursive setting of public flags worked
folder = folderModel.load(folder['_id'], force=True)
self.assertEqual(folder['publicFlags'], ['my_key'])
itemModel.requireAccessFlags(item, user=None, flags='my_key')
# Non-admin shouldn't be able to set admin-only public flags
folder = folderModel.setPublicFlags(
folder, flags=['admin_flag'], user=self.users[1], save=True)
self.assertEqual(folder['publicFlags'], [])
# Admin users should be able to set admin-only public flags
folder = folderModel.setPublicFlags(
folder, flags=['admin_flag'], user=self.users[0], save=True, append=True)
self.assertEqual(folder['publicFlags'], ['admin_flag'])
# Non-admin users can set admin-only public flags if they are already enabled
folder = folderModel.setPublicFlags(
folder, flags=['admin_flag', 'my_key'], user=self.users[1], save=True)
self.assertEqual(set(folder['publicFlags']), {'admin_flag', 'my_key'})
# Test "force" options
folder = folderModel.setPublicFlags(folder, flags='admin_flag', force=True, save=True)
self.assertEqual(folder['publicFlags'], ['admin_flag'])
folder = folderModel.setAccessList(folder, access={
'users': [{
'id': self.users[1]['_id'],
'level': AccessType.ADMIN,
'flags': ['my_key', 'admin_flag']
}],
'groups': []
}, save=True, force=True)
folderModel.requireAccessFlags(folder, user=self.users[1], flags='my_key')
folder = folderModel.setUserAccess(
folder, self.users[1], level=AccessType.READ, save=True, force=True, flags=[])
self.assertFalse(folderModel.hasAccessFlags(folder, self.users[1], flags='my_key'))
folder = folderModel.setGroupAccess(
folder, self.group, level=AccessType.READ, save=True, force=True, flags='my_key')
folderModel.requireAccessFlags(folder, user=self.users[1], flags='my_key')
# Testing with flags=None should give sensible behavior
folderModel.requireAccessFlags(folder, user=None, flags=None)
# Test filtering results by access flags (both ACModel and AclMixin)
for model, doc in ((folderModel, folder), (itemModel, item)):
cursor = model.find({})
self.assertGreater(len(list(cursor)), 0)
cursor = model.find({})
filtered = list(model.filterResultsByPermission(
cursor, user=None, level=AccessType.READ, flags='my_key'))
self.assertEqual(len(filtered), 0)
cursor = model.find({})
filtered = list(model.filterResultsByPermission(
cursor, user=self.users[1], level=AccessType.READ, flags=('my_key', 'admin_flag')))
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0]['_id'], doc['_id'])
def testServerRootSetting(self):
settingModel = Setting()
with self.assertRaises(ValidationException):
settingModel.set(SettingKey.SERVER_ROOT, 'bad_value')
settingModel.set(SettingKey.SERVER_ROOT, 'https://somedomain.org/foo')
self.assertEqual(getApiUrl(), 'https://somedomain.org/foo/api/v1')
def testCollectionCreationPolicySettingAndItsAccessAPI(self):
resp = self.request(path='/system/setting', method='PUT', params={
'list': json.dumps([
{'key': SettingKey.COLLECTION_CREATE_POLICY, 'value': json.dumps({
'open': True,
'users': [str(self.users[1]['_id'])],
'groups': [str(self.group['_id'])]
})}
])
}, user=self.users[0])
self.assertStatusOk(resp)
resp = self.request(path='/system/setting/collection_creation_policy/access',
method='GET', user=self.users[0])
self.assertEqual(resp.json['users'][0]['id'], str(self.users[1]['_id']))
self.assertEqual(resp.json['users'][0]['login'], str(self.users[1]['login']))
self.assertEqual(resp.json['groups'][0]['id'], str(self.group['_id']))
# Delete underlying users and groups, should be OK
Group().remove(self.group)
User().remove(self.users[1])
resp = self.request(
path='/system/setting/collection_creation_policy/access', method='GET',
user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['users'], [])
self.assertEqual(resp.json['groups'], [])
|
|
from datetime import date, time, timedelta
from decimal import Decimal
import random
from django.db.models import F, Func, Value
from django.db.models.functions import Concat
from django.test import TestCase
from django.utils import timezone
from django_bulk_update import helper
from .models import Person, Role, PersonUUID
from .fixtures import create_fixtures
class BulkUpdateTests(TestCase):
def setUp(self):
self.now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
self.date = date(2015, 3, 28)
self.time = time(13, 0)
create_fixtures()
def _test_field(self, field, idx_to_value_function):
'''
Helper to do repeative simple tests on one field.
'''
# set
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
value = idx_to_value_function(idx)
setattr(person, field, value)
# update
Person.objects.bulk_update(people, update_fields=[field])
# check
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = getattr(person, field)
expected_value = idx_to_value_function(idx)
self.assertEqual(saved_value, expected_value)
def test_simple_fields(self):
fn = lambda idx: idx + 27
for field in ('default', 'big_age', 'age', 'positive_age',
'positive_small_age', 'small_age'):
self._test_field(field, fn)
def test_comma_separated_integer_field(self):
fn = lambda idx: str(idx) + ',27'
self._test_field('comma_separated_age', fn)
def test_boolean_field(self):
fn = lambda idx: [True, False][idx % 2]
self._test_field('certified', fn)
def test_null_boolean_field(self):
fn = lambda idx: [True, False, None][idx % 3]
self._test_field('null_certified', fn)
def test_char_field(self):
NAMES = ['Walter', 'The Dude', 'Donny', 'Jesus', 'Buddha', 'Clark']
fn = lambda idx: NAMES[idx % 5]
self._test_field('name', fn)
def test_email_field(self):
EMAILS = ['walter@mailinator.com', 'thedude@mailinator.com',
'donny@mailinator.com', 'jesus@mailinator.com',
'buddha@mailinator.com', 'clark@mailinator.com']
fn = lambda idx: EMAILS[idx % 5]
self._test_field('email', fn)
def test_file_path_field(self):
PATHS = ['/home/dummy.txt', '/Downloads/kitten.jpg',
'/Users/user/fixtures.json', 'dummy.png',
'users.json', '/home/dummy.png']
fn = lambda idx: PATHS[idx % 5]
self._test_field('file_path', fn)
def test_slug_field(self):
SLUGS = ['jesus', 'buddha', 'clark', 'the-dude', 'donny', 'walter']
fn = lambda idx: SLUGS[idx % 5]
self._test_field('slug', fn)
def test_text_field(self):
TEXTS = ['this is a dummy text', 'dummy text', 'bla bla bla bla bla',
'here is a dummy text', 'dummy', 'bla bla bla']
fn = lambda idx: TEXTS[idx % 5]
self._test_field('text', fn)
def test_url_field(self):
URLS = ['docs.djangoproject.com', 'news.ycombinator.com',
'https://docs.djangoproject.com', 'https://google.com',
'google.com', 'news.ycombinator.com']
fn = lambda idx: URLS[idx % 5]
self._test_field('url', fn)
def test_date_time_field(self):
fn = lambda idx: self.now - timedelta(days=1 + idx, hours=1 + idx)
self._test_field('date_time', fn)
def test_date_field(self):
fn = lambda idx: self.date - timedelta(days=1 + idx)
self._test_field('date', fn)
def test_time_field(self):
fn = lambda idx: time(1 + idx, idx)
self._test_field('time', fn)
def test_decimal_field(self):
fn = lambda idx: Decimal('1.%s' % (50 + idx * 7))
self._test_field('height', fn)
def test_float_field(self):
fn = lambda idx: float(idx) * 2.0
self._test_field('float_height', fn)
def test_generic_ipaddress_field(self):
IPS = ['127.0.0.1', '192.0.2.30', '2a02:42fe::4', '10.0.0.1',
'8.8.8.8']
fn = lambda idx: IPS[idx % 5]
self._test_field('remote_addr', fn)
def test_image_field(self):
IMGS = ['kitten.jpg', 'dummy.png', 'user.json', 'dummy.png', 'foo.gif']
fn = lambda idx: IMGS[idx % 5]
self._test_field('image', fn)
self._test_field('my_file', fn)
def test_custom_fields(self):
values = {}
people = Person.objects.all()
people_dict = {p.name: p for p in people}
person = people_dict['Mike']
person.data = {'name': 'mikey', 'age': 99, 'ex': -99}
values[person.pk] = {'name': 'mikey', 'age': 99, 'ex': -99}
person = people_dict['Mary']
person.data = {'names': {'name': []}}
values[person.pk] = {'names': {'name': []}}
person = people_dict['Pete']
person.data = []
values[person.pk] = []
person = people_dict['Sandra']
person.data = [{'name': 'Pete'}, {'name': 'Mike'}]
values[person.pk] = [{'name': 'Pete'}, {'name': 'Mike'}]
person = people_dict['Ash']
person.data = {'text': 'bla'}
values[person.pk] = {'text': 'bla'}
person = people_dict['Crystal']
values[person.pk] = person.data
Person.objects.bulk_update(people)
people = Person.objects.all()
for person in people:
self.assertEqual(person.data, values[person.pk])
def test_update_fields(self):
"""
Only the fields in "update_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, update_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertNotEqual(person1.height, person2.height)
def test_update_foreign_key_fields(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people)
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
person.big_age += 40
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
self.assertNotEqual(person1.big_age, person2.big_age)
def test_update_foreign_key_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_exclude_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
person.big_age += 40
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
self.assertNotEqual(person1.big_age, person2.big_age)
def test_update_foreign_key_exclude_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_exclude_fields(self):
"""
Only the fields not in "exclude_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, exclude_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertNotEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_exclude_fields_with_tuple_exclude_fields(self):
"""
Only the fields not in "exclude_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, exclude_fields=('age',))
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertNotEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_object_list(self):
"""
Pass in a list instead of a queryset for bulk updating
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
Person.objects.bulk_update(list(people))
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
self.assertEqual(person.big_age, idx + 27)
def test_empty_list(self):
"""
Update no elements, passed as a list
"""
Person.objects.bulk_update([])
def test_empty_queryset(self):
"""
Update no elements, passed as a queryset
"""
people = Person.objects.filter(name="Aceldotanrilsteucsebces ECSbd")
Person.objects.bulk_update(people)
def test_one_sized_list(self):
"""
Update one sized list, check if have a syntax error
for some db backends.
"""
people = Person.objects.all()[:1]
Person.objects.bulk_update(list(people))
def test_one_sized_queryset(self):
"""
Update one sized list, check if have a syntax error
for some db backends.
"""
people = Person.objects.filter(name='Mike')
Person.objects.bulk_update(people)
def test_wrong_field_names(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, update_fields=['somecolumn', 'name'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, exclude_fields=['somecolumn'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, update_fields=['somecolumn'],
exclude_fields=['someothercolumn'])
def test_batch_size(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
updated_obj_count = Person.objects.bulk_update(people, batch_size=1)
self.assertEqual(updated_obj_count, len(people))
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_uuid_pk(self):
"""
Test 'bulk_update' with a model whose pk is an uuid.
"""
# create
PersonUUID.objects.bulk_create(
[PersonUUID(age=c) for c in range(20, 30)])
# set
people = PersonUUID.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = idx * 11
# update
PersonUUID.objects.bulk_update(people, update_fields=['age'])
# check
people = PersonUUID.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = person.age
expected_value = idx * 11
self.assertEqual(saved_value, expected_value)
def test_F_expresion(self):
# initialize
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = idx*10
person.save()
# set
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = F('age') - idx
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = person.age
expected_value = idx*10 - idx
self.assertEqual(saved_value, expected_value)
def test_Func_expresion(self):
# initialize
ini_values = 'aA', 'BB', '', 'cc', '12'
people = Person.objects.order_by('pk').all()
for value, person in zip(ini_values, people):
person.name = value
person.text = value*2
person.save()
# set
people = Person.objects.order_by('pk').all()
for person in people:
person.name = Func(F('name'), function='UPPER')
person.text = Func(F('text'), function='LOWER')
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
expected_values = 'AA', 'BB', '', 'CC', '12'
for expected_value, person in zip(expected_values, people):
saved_value = person.name
self.assertEqual(saved_value, expected_value)
expected_values = 'aaaa', 'bbbb', '', 'cccc', '1212'
for expected_value, person in zip(expected_values, people):
saved_value = person.text
self.assertEqual(saved_value, expected_value)
def test_Concat_expresion(self):
# initialize
ini_values_1 = 'a', 'b', 'c', 'd', 'e'
ini_values_2 = 'v', 'w', 'x', 'y', 'z'
people = Person.objects.order_by('pk').all()
for value1, value2, person in zip(ini_values_1, ini_values_2, people):
person.slug = value1
person.name = value2
person.save()
# set
people = Person.objects.order_by('pk').all()
for person in people:
person.text = Concat(F('slug'), Value('@'), F('name'))
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
expected_values = 'a@v', 'b@w', 'c@x', 'd@y', 'e@z'
for expected_value, person in zip(expected_values, people):
saved_value = person.text
self.assertEqual(saved_value, expected_value)
def test_different_deferred_fields(self):
# initialize
people = Person.objects.order_by('pk').all()
for person in people:
person.name = 'original name'
person.text = 'original text'
person.save()
# set
people1 = list(Person.objects.filter(age__lt=10).only('name'))
people2 = list(Person.objects.filter(age__gte=10).only('text'))
people = people1 + people2
for person in people:
if person.age < 10:
person.name = 'changed name'
else:
person.text = 'changed text'
# update
count = Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
self.assertEquals(count, people.count())
for person in people:
if person.age < 10:
self.assertEquals(person.name, 'changed name')
self.assertEquals(person.text, 'original text')
else:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'changed text')
def test_different_deferred_fields_02(self):
# initialize
people = Person.objects.order_by('pk').all()
for person in people:
person.name = 'original name'
person.text = 'original text'
person.save()
# set
people1 = list(Person.objects.filter(age__lt=10).only('name'))
people2 = list(Person.objects.filter(age__gte=10).only('text'))
people = people1 + people2
for person in people:
if person.age < 10:
person.name = 'changed name'
else:
person.text = 'changed text'
# update
count = Person.objects.bulk_update(people, exclude_fields=['name'])
# check
people = Person.objects.order_by('pk').all()
self.assertEquals(count, people.count())
for person in people:
if person.age < 10:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'original text')
else:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'changed text')
class NumQueriesTest(TestCase):
def setUp(self):
create_fixtures(5)
def test_num_queries(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.order_by('pk').all()
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_already_evaluated_queryset(self):
"""
Queries:
- update objects
(objects are already retrieved, because of the previous loop)
"""
people = Person.objects.all()
for person in people:
person.age += 2
person.name = Func(F('name'), function='UPPER')
person.text = 'doc'
person.height -= Decimal(0.5)
self.assertNumQueries(1, Person.objects.bulk_update, people)
def test_explicit_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all()
self.assertNumQueries(
2, Person.objects.bulk_update, people,
update_fields=['date', 'time', 'image', 'slug', 'height'],
exclude_fields=['date', 'url']
)
def test_deferred_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all().only('date', 'url', 'age', 'image')
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_different_deferred_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
all_people = Person.objects
people1 = all_people.filter(age__lt=10).defer('date', 'url', 'age')
people2 = all_people.filter(age__gte=10).defer('url', 'name', 'big_age')
people = people1 | people2
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_deferred_fields_and_excluded_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all().only('date', 'age', 'time', 'image', 'slug')
self.assertNumQueries(2, Person.objects.bulk_update, people,
exclude_fields=['date', 'url'])
def test_list_of_objects(self):
"""
Queries:
- update objects
(objects are already retrieved, because of the cast to list)
"""
people = list(Person.objects.all())
self.assertNumQueries(1, Person.objects.bulk_update, people)
def test_fields_to_update_are_deferred(self):
"""
As all fields in 'update_fields' are deferred,
a query will be done for each obj and field to retrieve its value.
"""
people = Person.objects.all().only('pk')
update_fields = ['date', 'time', 'image']
expected_queries = len(update_fields) * Person.objects.count() + 2
self.assertNumQueries(expected_queries, Person.objects.bulk_update,
people, update_fields=update_fields)
def test_no_field_to_update(self):
"""
Queries:
- retrieve objects
(as update_fields is empty, no update query will be done)
"""
people = Person.objects.all()
self.assertNumQueries(1, Person.objects.bulk_update,
people, update_fields=[])
def test_no_objects(self):
"""
Queries:
- retrieve objects
(as no objects is actually retrieved, no update query will be done)
"""
people = Person.objects.filter(name='xxx')
self.assertNumQueries(1, Person.objects.bulk_update,
people, update_fields=['age', 'height'])
def test_batch_size(self):
"""
Queries:
- retrieve objects
- update objects * 3
"""
self.assertEquals(Person.objects.count(), 5)
people = Person.objects.order_by('pk').all()
self.assertNumQueries(4, Person.objects.bulk_update,
people, batch_size=2)
class GetFieldsTests(TestCase):
total_fields = 25
def setUp(self):
create_fixtures()
def _assertEquals(self, fields, names):
self.assertEquals(
set(field.name for field in fields),
set(names),
)
def _assertIn(self, names, fields):
field_names = [field.name for field in fields]
for name in names:
self.assertIn(name, field_names)
def _assertNotIn(self, names, fields):
field_names = [field.name for field in fields]
for name in names:
self.assertNotIn(name, field_names)
def test_get_all_fields(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields)
def test_dont_get_primary_key(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['id'], meta.get_fields()) # sanity check
self._assertNotIn(['id'], fields) # actual test
meta = PersonUUID.objects.create(age=3)._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['uuid'], meta.get_fields()) # sanity check
self._assertNotIn(['uuid'], fields) # actual test
def test_dont_get_reversed_relations(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['companies'], meta.get_fields()) # sanity check
self._assertNotIn(['companies'], fields) # actual test
def test_dont_get_many_to_many_relations(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['jobs'], meta.get_fields()) # sanity check
self._assertNotIn(['jobs'], fields) # actual test
def test_update_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'email', 'text']
exclude_fields = []
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'email', 'text'])
def test_update_fields_and_exclude_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'email', 'text']
exclude_fields = ['email', 'height']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'text'])
def test_empty_update_fields(self):
meta = Person.objects.first()._meta
update_fields = []
exclude_fields = ['email', 'height']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, [])
def test_exclude_a_foreignkey(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = ['email', 'role']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields - 2)
self._assertNotIn(['email', 'role'], fields)
def test_exclude_foreignkey_with_id_suffix(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = ['email', 'role_id']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields - 2)
self._assertNotIn(['email', 'role'], fields)
def test_get_a_foreignkey(self):
meta = Person.objects.first()._meta
update_fields = ['role', 'my_file']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['role', 'my_file'])
def test_get_foreignkey_with_id_suffix(self):
meta = Person.objects.first()._meta
update_fields = ['role_id', 'my_file']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['role', 'my_file'])
def test_obj_argument(self):
obj = Person.objects.first()
meta = obj._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self.assertEquals(len(fields), self.total_fields)
def test_only_get_not_deferred_fields(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['name', 'age', 'height'])
def test_only_and_exclude_fields(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = ['age', 'date']
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['name', 'height'])
def test_only_and_exclude_fields_02(self):
obj = Person.objects.defer('age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = ['image', 'data']
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self.assertEquals(len(fields), self.total_fields - 4)
self._assertNotIn(['age', 'height', 'image', 'data'], fields)
def test_update_fields_over_not_deferred_field(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = ['date', 'time', 'age']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['date', 'time', 'age'])
def test_update_fields_over_not_deferred_field_02(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = []
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, [])
def test_arguments_as_tuples(self):
meta = Person.objects.first()._meta
update_fields = ('age', 'email', 'text')
exclude_fields = ('email', 'height')
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'text'])
def test_validate_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'wrong_name', 'text']
exclude_fields = ('email', 'height')
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = ('age', 'email', 'text')
exclude_fields = ('email', 'bad_name')
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = ('companies', )
exclude_fields = None
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = None
exclude_fields = ['jobs']
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
|
|
"""Reward analysis functions."""
from __future__ import absolute_import
import numpy as np
import pandas as pd
import itertools as it
from collections import defaultdict
import warnings
from scipy.ndimage.morphology import binary_dilation
from . import behavior_analysis as ba
from . import new_intervals as ints
from ..misc.analysis_helpers import rewards_by_condition
from ..classes.classes import ExperimentGroup
from ..classes import exceptions as exc
def lick_to_reward_distance(expt_grp, rewardPositions=None):
"""Calculate the average lick to reward distance.
Parameters
----------
rewardPositions : {str, None, np.ndarray}
If a string, assumed to be a condition label, and will use the
reward positions used for each mouse during the condition.
If 'None', uses the actual reward positions during the experiment.
Otherwise pass in normalized reward positions.
Returns
-------
pd.DataFrame
"""
result = []
if rewardPositions is None:
rewards_by_expt = {
expt: expt.rewardPositions(units='normalized')
for expt in expt_grp}
elif isinstance(rewardPositions, basestring):
rewards_by_expt = rewards_by_condition(
expt_grp, rewardPositions, condition_column='condition')
else:
rewards_by_expt = defaultdict(lambda: np.array(rewardPositions))
for expt in expt_grp:
rewards = rewards_by_expt[expt]
for trial in expt.findall('trial'):
bd = trial.behaviorData(imageSync=False)
position = ba.absolutePosition(
trial, imageSync=False, sampling_interval='actual')
if np.any(rewards >= 1.0):
trial_rewards = rewards / bd['trackLength']
else:
trial_rewards = rewards
licking = bd['licking'][:, 0]
licking = licking[np.isfinite(licking)]
licking = licking / bd['samplingInterval']
licking = licking.astype('int')
licking_positions = position[licking] % 1
# meshgrid sets up the subtraction below
# basically tile expands the arrays
rewards_mesh, licking_mesh = np.meshgrid(
trial_rewards, licking_positions)
reward_distance = licking_mesh - rewards_mesh
# All distances should be on [-0.5, 0.5)
reward_distance[reward_distance >= 0.5] -= 1.0
reward_distance[reward_distance < -0.5] += 1.0
reward_distance = np.amin(np.abs(reward_distance), axis=1)
assert len(licking_positions) == len(reward_distance)
for lick, position in it.izip(
reward_distance, licking_positions):
result.append(
{'trial': trial, 'position': position, 'value': lick})
return pd.DataFrame(result, columns=['trial', 'position', 'value'])
def fraction_rewarded_lick_intervals(expt_grp, **lick_interval_kwargs):
"""Fraction of lick intervals that were rewarded.
Parameters
----------
**lick_interval_kwargs : dict
All additional keyword parameters are passed to
ba.calculateRewardedLickIntervals.
Returns
-------
pd.DataFrame
"""
result = []
for expt in expt_grp:
rewarded_intervals, unrewarded_intervals \
= ba.calculateRewardedLickIntervals(expt, **lick_interval_kwargs)
rewarded_count = sum(
[intervals.shape[0] for intervals in rewarded_intervals])
unrewarded_count = sum(
[intervals.shape[0] for intervals in unrewarded_intervals])
try:
fraction = rewarded_count \
/ float(rewarded_count + unrewarded_count)
except ZeroDivisionError:
fraction = np.nan
result.append({
'expt': expt, 'rewarded_count': rewarded_count,
'unrewarded_counts': unrewarded_count, 'value': fraction})
return pd.DataFrame(result, columns=[
'expt', 'rewarded_count', 'unrewarded_count', 'value'])
def fraction_licks_in_rewarded_intervals(expt_grp, **lick_interval_kwargs):
"""Fraction of licks that are within a rewarded lick interval.
Parameters
----------
**lick_interval_kwargs : dict
All additional keyword parameters are passed to
ba.calculateRewardedLickIntervals.
Returns
-------
pd.DataFrame
"""
result = []
for expt in expt_grp:
rewarded_intervals, _ = ba.calculateRewardedLickIntervals(
expt, **lick_interval_kwargs)
licks = []
for trial in expt.findall('trial'):
sampling_interval = trial.behavior_sampling_interval()
licks.append(
trial.behaviorData(imageSync=False)['licking'][:, 0] /
float(sampling_interval))
rewarded_licks = np.zeros(len(rewarded_intervals))
total_licks = 0
for trial_idx, intervals_trial, licks_trial in it.izip(
it.count(), rewarded_intervals, licks):
total_licks += licks_trial.shape[0]
for interval in intervals_trial:
rewarded_licks[trial_idx] += np.sum(
(interval[0] <= licks_trial) &
(interval[1] >= licks_trial))
total_rewarded_licks = np.sum(rewarded_licks)
try:
fraction = total_rewarded_licks / float(total_licks)
except ZeroDivisionError:
fraction = np.nan
result.append({
'expt': expt, 'rewarded_licks': total_rewarded_licks,
'total_licks': total_licks, 'value': fraction})
return pd.DataFrame(result, columns=[
'expt', 'rewarded_licks', 'total_licks', 'value'])
def fraction_licks_rewarded(expt_grp):
"""Fraction of possible licks rewarded.
Takes in to account the operant reward rate.
Returns
-------
pd.DataFrame
"""
result = []
for expt in expt_grp:
totalLicks = sum([
trial.behaviorData()['licking'].shape[0]
for trial in expt.findall('trial')])
totalWater = sum([
trial.behaviorData()['water'].shape[0]
for trial in expt.findall('trial')])
rewardRate = expt.reward_parameters().get('operant_rate', 1)
try:
fraction = float(totalWater) / (totalLicks / float(rewardRate))
except ZeroDivisionError:
fraction = np.nan
result.append({
'expt': expt, 'lick': totalLicks, 'water': totalWater,
'value': fraction})
return pd.DataFrame(result, columns=['expt', 'lick', 'water', 'value'])
def fractionLicksNearRewardsPerLap(
expGrp, anticipStartCM=-5, anticipEndCM=-0.1, compareStartCM=-15,
compareEndCM=-0.1, fractionColName="value", rewardPositions=None,
exclude_reward=False, exclude_reward_duration=10.0):
"""Fraction of licks in the anticipatory zone vs a compare zone, per lap.
Parameters
----------
anticipStartCM, anticipEndCM : float
licks in this spatial window is counted anticipatory.
Units are in cm. The reward zone start is considered as 0.
Prereward space is negative, and post reward space is positive.
copareStartCM, compareEndCM : float
licks in this window is counted toward total licks.
Usually, this window should contain the anticipatory window.
fractionColName : str
the name to give to the lick fraction column of the returned
dataframe, default is "value"
rewardPositions : {str, None, np.ndarray}
If a string, assumed to be a condition label, and will use the
reward positions used for each mouse during the condition.
If 'None', uses the actual reward positions during the experiment.
Otherwise pass in normalized reward positions.
exclude_reward : bool
exclude licks that occurs after water rewards. This is trying to
ignore licks that are for drinking water. Default is false.
exclude_duration : float
number of seconds after the onset of water reward for which the
licking should be ignored. Default is 10 seconds.
Returns
-------
pd.DataFrame
Each row is the licking calculation of a lap, with columns:
trial - trial instance
rewardPosition - reward location
lapNum - the lap number
anticipLicks - number of licks in the anticipatory zone
compareLicks - number of licks in the compare zone
value - anticipLicks/compareLicks.
the name of this column is set by the kwarg fractionColName,
when the compare zone contains the anticipatory zone, it is
similar to fraction of licks near rewards
"""
result = []
if(rewardPositions is None):
rewards_by_exp = {exp: exp.rewardPositions(units='normalized')
for exp in expGrp}
elif(isinstance(rewardPositions, basestring)):
rewards_by_exp = rewards_by_condition(
expGrp, rewardPositions, condition_column="condition")
else:
rewards_by_exp = defaultdict(lambda: np.array(rewardPositions))
for exp in expGrp:
try:
belt_length = exp.belt().length()
except exc.NoBeltInfo:
warnings.warn('Missing belt length. All results may be wrong.')
belt_length = 220
anticipStart = anticipStartCM / float(belt_length)
anticipEnd = anticipEndCM / float(belt_length)
compareStart = compareStartCM / float(belt_length)
compareEnd = compareEndCM / float(belt_length)
rewards = rewards_by_exp[exp]
for trial in exp.findall("trial"):
position = ba.absolutePosition(
trial, imageSync=False, sampling_interval="actual")
bd = trial.behaviorData(
imageSync=False, sampling_interval="actual")
lapNum = position.astype("int32")
for reward in rewards:
for i in np.r_[0:np.max(lapNum)]:
absRewardPos = i + reward
anticipS = absRewardPos + anticipStart
anticipE = absRewardPos + anticipEnd
compareS = absRewardPos + compareStart
compareE = absRewardPos + compareEnd
anticipBA = (position >= anticipS) & (position < anticipE)
compareBA = (position >= compareS) & (position < compareE)
if exclude_reward:
numExcPoints = np.int(np.float(
exclude_reward_duration)/bd["samplingInterval"])
try:
firstWater = np.where(compareBA & bd["water"])[0]
if(firstWater.size > 0):
firstWater = firstWater[0]
compareBA[firstWater:firstWater +
numExcPoints] = False
anticipBA[firstWater:firstWater +
numExcPoints] = False
except KeyError:
print("No water signal exist, " +
"exclude_reward not in effect")
numAnticipLicks = np.sum(bd["licking"][anticipBA])
numCompareLicks = np.sum(bd["licking"][compareBA])
fraction = numAnticipLicks / float(numCompareLicks)
result.append({"trial": trial,
"rewardPos": reward,
"lapNum": i,
"anticipLicks": numAnticipLicks,
"compareLicks": numCompareLicks,
fractionColName: fraction})
return pd.DataFrame(result, columns=[
'trial', 'rewardPos', 'lapNum', 'anticipLicks', 'compareLicks'])
def fractionLicksNearRewards(expt_grp, **kwargs):
"""Aggregate fractionLicksNearRewardsPerLap by Trial."""
fraction_per_lap = fractionLicksNearRewardsPerLap(expt_grp, **kwargs)
result = fraction_per_lap.groupby('trial', as_index=False).apply(
lambda x: pd.Series({
'trial': x.trial.iloc[0],
'value': x.anticipLicks.sum() / float(x.compareLicks.sum())}))
return result
def fraction_licks_near_rewards(
expt_grp, pre_window_cm=5, post_window_cm=10, rewardPositions=None,
exclude_reward=False):
"""Fraction of licks near the reward locations.
Parameters
----------
pre_window_cm, post_window_cm : float
Window to consider "near" the rewards
rewardPositions : {str, None, np.ndarray}
If a string, assumed to be a condition label, and will use the
reward positions used for each mouse during the condition.
If 'None', uses the actual reward positions during the experiment.
Otherwise pass in normalized reward positions.
exlude_reward : boolean
Do not consider licks in the reward zone after post_window_cm
Returns
-------
pd.DataFrame
"""
result = []
if rewardPositions is None:
rewards_by_expt = {
expt: expt.rewardPositions(units='normalized')
for expt in expt_grp}
elif isinstance(rewardPositions, basestring):
rewards_by_expt = rewards_by_condition(
expt_grp, rewardPositions, condition_column='condition')
else:
rewards_by_expt = defaultdict(lambda: np.array(rewardPositions))
for expt in expt_grp:
belt_length = expt.belt().length()
pre = float(pre_window_cm) / belt_length
post = float(post_window_cm) / belt_length
rewards = rewards_by_expt[expt]
# #check to ensure the spatial windows around each reward are non-overlapping
# rewardPositionCombos = it.combinations(rewards, 2)
# for (r1, r2) in rewardPositionCombos:
# diff = np.abs(r1 - r2)
# if diff > .5:
# diff = 1 - diff
# assert diff > pre + post, "Rewards at %f and %f are too close for pre_window_cm = %f and post_window_cm = %f" % (r1, r2, pre_window_cm, post_window_cm)
for trial in expt.findall('trial'):
bd = trial.behaviorData(imageSync=False)
position = ba.absolutePosition(
trial, imageSync=False, sampling_interval='actual')
if np.any(rewards >= 1.0):
trial_rewards = rewards / bd['trackLength']
else:
trial_rewards = rewards
licking = bd['licking'][:, 0]
licking = licking[np.isfinite(licking)]
licking = licking / bd['samplingInterval']
licking = licking.astype('int')
licking_positions = position[licking] % 1
# meshgrid sets up the subtraction below
# basically tile expands the arrays
rewards_mesh, licking_mesh = np.meshgrid(
trial_rewards, licking_positions)
reward_distance = licking_mesh - rewards_mesh
# All distances should be on [-0.5, 0.5)
reward_distance[reward_distance >= 0.5] -= 1.0
reward_distance[reward_distance < -0.5] += 1.0
lick_near_reward = np.bitwise_and(
-pre < reward_distance, reward_distance < post)
lick_near_reward = np.any(lick_near_reward, axis=1)
near_licks = np.sum(lick_near_reward)
if exclude_reward:
reward_zone_length = expt.reward_parameters(
distance_units='normalized')['window_length']
licks_to_exclude = np.bitwise_and(
post < reward_distance,
reward_distance < reward_zone_length)
licks_to_exclude = np.any(licks_to_exclude, axis=1)
total_licks = len(licking) - np.sum(licks_to_exclude)
else:
total_licks = len(licking)
try:
fraction = near_licks / float(total_licks)
except ZeroDivisionError:
fraction = np.nan
result.append({
'trial': trial, 'near_licks': int(near_licks),
'total_licks': total_licks, 'value': fraction})
return pd.DataFrame(result, columns=[
'trial', 'near_licks', 'total_licks', 'value'])
def number_licks_near_rewards(*args, **kwargs):
df = fraction_licks_near_rewards(*args, **kwargs)
df['fraction'] = df['value']
df['value'] = df['near_licks']
return df
def number_licks_away_rewards(*args, **kwargs):
df = fraction_licks_near_rewards(*args, **kwargs)
df['fraction'] = df['value']
df['value'] = df['total_licks'] - df['near_licks']
return df
def licking_spatial_information(expt_grp):
"""Calculate spatial information rate (bits/sec) of the licking signal
calculated across trials
"""
# TODO: make sure that this is calculating information correctly
nBins = 100.
result = []
for expt in expt_grp:
nLicks_by_position, _ = expt.licktogram(
nPositionBins=nBins, rewardPositions=None, normed=False)
time_by_position = expt.positionOccupancy(
nPositionBins=nBins, normed=False)
occupancy = expt.positionOccupancy(nPositionBins=nBins, normed=True)
lick_rate_by_position = nLicks_by_position.astype(float) \
/ time_by_position
overall_lick_rate = float(np.sum(nLicks_by_position)) \
/ np.sum(time_by_position)
integrand = lick_rate_by_position * np.log(
lick_rate_by_position / float(overall_lick_rate)) * occupancy
# take nansum because of positions at which there was zero licking (log undefined)
result.append({'expt': expt, 'value': np.nansum(integrand)})
return pd.DataFrame(result, columns=['expt', 'value'])
def rate_of_water_obtained(expt_grp):
"""Calculates the rate of water obtained during the experiment.
Returns result in milliseconds / minute
"""
result = []
for expt in expt_grp:
for trial in expt.findall('trial'):
bd = trial.behaviorData(imageSync=False)
ms_water = np.sum([x[1] - x[0] for x in bd['water']]) * 1000.
trial_duration_min = bd['recordingDuration'] / 60.
fraction = float(ms_water) / trial_duration_min
result.append({
'trial': trial, 'ms_water': ms_water,
'trial_duration': trial_duration_min, 'value': fraction})
return pd.DataFrame(result, columns=['trial', 'ms_water', 'trial_duration', 'value'])
# def fraction_licks_in_reward_zone_old(expt_grp):
# """Calculates the fraction of licks that were within the reward zone"""
# result = []
# for expt in expt_grp:
# for trial in expt.findall('trial'):
# bd = trial.behaviorData(
# imageSync=False, sampling_interval='actual')
# n_licks = np.sum(
# np.diff(np.hstack([0, bd['licking']]).astype('int')) > 0)
# licks_in_reward = bd['licking'] * bd['reward']
# n_licks_in_reward = np.sum(
# np.diff(np.hstack([0, licks_in_reward]).astype('int')) > 0)
# try:
# fraction = n_licks_in_reward / float(n_licks)
# except ZeroDivisionError:
# fraction = np.nan
# result.append({
# 'trial': trial, 'total_licks': n_licks,
# 'licks_in_reward': n_licks_in_reward, 'value': fraction})
# return pd.DataFrame(result, columns=[
# 'trial', 'total_licks', 'licks_in_reward', 'value'])
def fraction_licks_in_reward_zone(expt_grp):
"""Calculate the fraction of licks that were within the reward zone."""
rew_intervals = ints.behavior(expt_grp, 'reward')
licking_intervals = ints.behavior(expt_grp, 'licking')
n_licks = licking_intervals.groupby('trial', as_index=False).agg(len)
n_licks.rename(columns={'start': 'total_licks'}, inplace=True)
del n_licks['stop']
licks_in_reward = rew_intervals.filter_events(
licking_intervals, 'start').groupby('trial', as_index=False).agg(len)
licks_in_reward.rename(columns={'start': 'licks_in_reward'}, inplace=True)
del licks_in_reward['stop']
result = pd.merge(licks_in_reward, n_licks, on='trial', how='outer')
result['licks_in_reward'] = result['licks_in_reward'].fillna(0)
result['value'] = result['licks_in_reward'] / \
result['total_licks'].astype('float')
return result
def fraction_of_laps_rewarded(expt_grp):
"""Fraction of laps with at least one reward."""
result = []
for expt in expt_grp:
for trial in expt.findall('trial'):
water = trial.behaviorData(
imageSync=False, sampling_interval='actual')['water']
pos = ba.absolutePosition(
trial, imageSync=False, sampling_interval='actual')
n_laps = int(pos.max())
# Need at least 1 full lap
n_laps -= 1
if n_laps <= 0:
continue
reward_laps = 0
for lap in range(1, n_laps):
lap_pos = np.logical_and(pos >= lap, pos < lap + 1)
if water[lap_pos].sum() > 0:
reward_laps += 1
try:
fraction = reward_laps / float(n_laps)
except ZeroDivisionError:
fraction = np.nan
result.append({'trial': trial, 'n_laps': n_laps,
'rewarded_laps': reward_laps,
'value': fraction})
return pd.DataFrame(result)
def licks_outside_reward_vicinity(expt_grp):
result = []
for expt in expt_grp:
for trial in expt.findall('trial'):
bd = trial.behaviorData(
imageSync=False, sampling_interval='actual')
n_licks = np.sum(
np.diff(np.hstack([0, bd['licking']]).astype('int')) > 0)
# "Reward vicinity" is 5s either side of reward zone (based on PSTH)
rewards = bd['reward']
five_secs = int(round((5. / expt.duration().seconds) * len(rewards)))
reward_vicinity = binary_dilation(rewards, iterations=five_secs)
licks_outside_reward_vicinity = bd['licking'] * ~reward_vicinity
lick_array = np.diff(np.hstack(
[0, licks_outside_reward_vicinity]).astype('int')) > 0
n_licks_outside_reward = np.sum(lick_array)
try:
fraction = n_licks_outside_reward / float(n_licks)
except ZeroDivisionError:
fraction = np.nan
result.append({
'trial': trial, 'total_licks': n_licks,
'licks_outside_reward': n_licks_outside_reward,
'value': fraction})
return pd.DataFrame(result, columns=[
'trial', 'total_licks', 'licks_outside_reward', 'value'])
def anticipatory_licking(expt_grp):
result = []
for expt in expt_grp:
for trial in expt.findall('trial'):
bd = trial.behaviorData(
imageSync=False, sampling_interval='actual')
n_licks = np.sum(
np.diff(np.hstack([0, bd['licking']]).astype('int')) > 0)
reward_zones = bd['reward']
five_secs = int(
round((5. / expt.duration().seconds) * len(reward_zones)))
expanded_reward = binary_dilation(
reward_zones, iterations=five_secs, structure=[1, 1, 0])
anticipation_zones = expanded_reward - reward_zones
anticipatory_licks = bd['licking'] * anticipation_zones
lick_array = np.diff(
np.hstack([0, anticipatory_licks]).astype('int')) > 0
n_anticipatory_licks = sum(lick_array)
try:
fraction = n_anticipatory_licks / float(n_licks)
except ZeroDivisionError:
fraction = np.nan
result.append({
'trial': trial, 'total_licks': n_licks,
'anticipatory_licks': n_anticipatory_licks,
'value': fraction})
return pd.DataFrame(result, columns=[
'trial', 'total_licks', 'anticipatory_licks', 'value'])
def fraction_of_laps_with_licking_near_reward(
expt_grp, pre_window_cm=5, post_window_cm=10,
rewardPositions=None):
"""Fraction of laps that have licking near the reward positions.
Arguments:
pre_window_cm, post_window_cm -- Window to consider "near" the rewards
rewardPositions : {str, None, np.ndarray}
If a string, assumed to be a condition label, and will use the
reward positions used for each mouse during the condition.
If 'None', uses the actual reward positions during the experiment.
Otherwise pass in normalized reward positions.
"""
result = []
if rewardPositions is None:
rewards_by_expt = {
expt: expt.rewardPositions(units='normalized')
for expt in expt_grp}
elif isinstance(rewardPositions, basestring):
rewards_by_expt = rewards_by_condition(
expt_grp, rewardPositions, condition_column='condition')
else:
rewards_by_expt = defaultdict(lambda: np.array(rewardPositions))
licking_pos = ExperimentGroup.stim_position(
expt_grp, 'licking', abs_pos=True)
for expt in expt_grp:
belt_length = expt.belt().length()
pre = float(pre_window_cm) / belt_length
post = float(post_window_cm) / belt_length
rewards = rewards_by_expt[expt]
for trial in expt.findall('trial'):
n_laps = int(ba.absolutePosition(
trial, imageSync=False, sampling_interval='actual').max())
# Don't include the first incomplete lap
n_laps -= 1
if n_laps <= 0:
continue
trial_pos = licking_pos[
licking_pos['trial'] == trial]['value'].values
if np.any(rewards >= 1.0):
track_length = trial.behaviorData(
imageSync=False)['trackLength']
trial_rewards = rewards / track_length
else:
trial_rewards = rewards
reward_laps = 0
for lap in range(1, n_laps):
for reward in trial_rewards:
lap_rew_start = lap + reward - pre
lap_rew_stop = lap + reward + post
in_rew = np.logical_and(
trial_pos >= lap_rew_start,
trial_pos < lap_rew_stop)
if in_rew.sum():
reward_laps += 1
break
result.append({
'trial': trial, 'n_laps': n_laps,
'rewarded_laps': reward_laps,
'value': reward_laps / float(n_laps)})
return pd.DataFrame(result, columns=[
'trial', 'n_laps', 'rewarded_laps', 'value'])
|
|
# coding: utf-8
# # `travelmaps2`: An updated version of `travelmaps`
#
# I did not want to change `travelmaps`, as it is a blog entry.
#
# These functions are very basic, and include almost no checking or similar at all. Feel free to fork and improve them!
# In[1]:
import shapefile
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib import rcParams, patheffects
from matplotlib.collections import LineCollection
# Disable DecompressionBombWarning
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
# In[2]:
def setup(dpi=300, sketch=(1, 100, 2), theme='light'):
"""Setup travelmaps."""
# Customized plt.xkcd()-settings
# http://jakevdp.github.io/blog/2013/07/10/XKCD-plots-in-matplotlib
rcParams['font.family'] = ['Humor Sans', 'Comic Sans MS']
rcParams['font.size'] = 8.0
rcParams['path.sketch'] = sketch
rcParams['axes.linewidth'] = 1.0
rcParams['lines.linewidth'] = 1.0
rcParams['grid.linewidth'] = 0.0
rcParams['axes.unicode_minus'] = False
if theme=='dark':
rcParams['path.effects'] = [patheffects.withStroke(linewidth=2, foreground="k")]
rcParams['figure.facecolor'] = 'black'
rcParams['figure.edgecolor'] = 'black'
rcParams['lines.color'] = 'white'
rcParams['patch.edgecolor'] = 'white'
rcParams['text.color'] = 'white'
rcParams['axes.facecolor'] = 'black'
rcParams['axes.edgecolor'] = 'white'
rcParams['axes.labelcolor'] = 'white'
rcParams['xtick.color'] = 'white'
rcParams['ytick.color'] = 'white'
rcParams['grid.color'] = 'white'
rcParams['savefig.facecolor'] = 'black'
rcParams['savefig.edgecolor'] = 'black'
else:
rcParams['path.effects'] = [patheffects.withStroke(linewidth=2, foreground="w")]
rcParams['figure.facecolor'] = 'white'
rcParams['figure.edgecolor'] = 'white'
rcParams['lines.color'] = 'black'
rcParams['patch.edgecolor'] = 'black'
rcParams['text.color'] = 'black'
rcParams['axes.facecolor'] = 'white'
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'black'
rcParams['ytick.color'] = 'black'
rcParams['grid.color'] = 'black'
rcParams['savefig.facecolor'] = 'white'
rcParams['savefig.edgecolor'] = 'white'
# *Bayesian Methods for Hackers*-colour-cylce
# (https://github.com/pkgpl/PythonProcessing/blob/master/results/matplotlibrc.bmh.txt)
rcParams['axes.prop_cycle'] = plt.cycler('color', ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00',
'#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2'])
# Adjust dpi, so figure on screen and savefig looks the same
rcParams['figure.dpi'] = dpi
rcParams['savefig.dpi'] = dpi
# In[ ]:
def setup_noxkcd(dpi=300, theme='light'):
"""Setup Maps."""
if theme=='dark':
rcParams['figure.facecolor'] = 'black'
rcParams['figure.edgecolor'] = 'black'
rcParams['lines.color'] = 'white'
rcParams['patch.edgecolor'] = 'white'
rcParams['text.color'] = 'white'
rcParams['axes.facecolor'] = 'black'
rcParams['axes.edgecolor'] = 'white'
rcParams['axes.labelcolor'] = 'white'
rcParams['xtick.color'] = 'white'
rcParams['ytick.color'] = 'white'
rcParams['grid.color'] = 'white'
rcParams['savefig.facecolor'] = 'black'
rcParams['savefig.edgecolor'] = 'black'
else:
rcParams['figure.facecolor'] = 'white'
rcParams['figure.edgecolor'] = 'white'
rcParams['lines.color'] = 'black'
rcParams['patch.edgecolor'] = 'black'
rcParams['text.color'] = 'black'
rcParams['axes.facecolor'] = 'white'
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'black'
rcParams['ytick.color'] = 'black'
rcParams['grid.color'] = 'black'
rcParams['savefig.facecolor'] = 'white'
rcParams['savefig.edgecolor'] = 'white'
# *Bayesian Methods for Hackers*-colour-cylce
# (https://github.com/pkgpl/PythonProcessing/blob/master/results/matplotlibrc.bmh.txt)
rcParams['axes.prop_cycle'] = plt.cycler('color', ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00',
'#CC79A7', '#56B4E9', '#009E73', '#F0E442', '#0072B2'])
# Adjust dpi, so figure on screen and savefig looks the same
rcParams['figure.dpi'] = dpi
rcParams['savefig.dpi'] = dpi
# In[3]:
def cm2in(length, decimals=2):
"""Convert cm to inch.
Parameters
----------
length : scalar or vector
Numbers to be converted.
decimals : int, optional; <2>
As in np.round, used to round the result.
Returns
-------
cm2in : scalar or vector
Converted numbers.
Examples
--------
>>> from adashof import cm2in
>>> cm2in(5)
1.97
"""
# Test input
try:
length = np.array(length, dtype='float')
decimals = int(decimals)
except ValueError:
print("{length} must be a number, {decimals} an integer")
return np.round(length/2.54, decimals)
# In[4]:
def country(countries, bmap, fc=None, ec='none', lw=1, alpha=1, adm=0, gadmpath='/home/dtr/Documents/Webpages/blog-notebooks/data/TravelMap/'):
"""Colour <countries> with a <bmap> projection.
This script is adapted from:
http://www.geophysique.be/2013/02/12/
matplotlib-basemap-tutorial-10-shapefiles-unleached-continued
I downloaded the countries shapefile from the *Global Administrative Areas*
website, [gadm.org](http://gadm.org).
=> You have to use the same abbreviations for the countries as GADM does, or adjust the script.
=> You have to download the shapefiles from GADM, and extract them into the <gadmpath> directory.
Of course, you can use any other shapfiles you have, and adjust the script accordingly.
Parameters
----------
countries : string or list of strings
Countries to be plotted.
bmap : handle
As you get from bmap = Basemap().
fc : None or colour, or list of colours; <None>
Face-colour for country; if <None>, it will cycle through colour-cycle.
ec : 'none' or colour (scalar or list); <'none'>
Edge-colour for country.
lw : scalar or list; <1>
Linewidth for country.
alpha: scalar or list; <1>
Transparency.
adm : {0, 1, 2, 3}; <0>
Administrative area to choose.
gadmpath : 'string'
Absolut or relative path to shapefiles.
"""
# Ensure countries is a list
if not isinstance(countries, list):
countries = [countries,]
# Get current axis
cax = plt.gca()
# Loop through the countries
for country in countries:
# Get shapefile for the country; extract shapes and records
r = shapefile.Reader(gadmpath+country+'_adm/'+country+'_adm'+str(adm),
encoding='windows-1252')
shapes = r.shapes()
records = r.records()
# Loop through the records; for adm0 this is only 1 run
n = 0
for record, shape in zip(records,shapes):
lons,lats = zip(*shape.points)
data = np.array(bmap(lons, lats)).T
if len(shape.parts) == 1:
segs = [data,]
else:
segs = []
for i in range(1,len(shape.parts)):
index = shape.parts[i-1]
index2 = shape.parts[i]
segs.append(data[index:index2])
segs.append(data[index2:])
lines = LineCollection(segs,antialiaseds=(1,))
# If facecolor is provided, use; else cycle through colours
if fc:
if not isinstance(fc, list):
lines.set_facecolors(fc)
else:
lines.set_facecolors(fc[n])
else:
cycle = cax._get_lines.prop_cycler
lines.set_facecolors(next(cycle)['color'])
# Edge colour
if not isinstance(ec, list):
lines.set_edgecolors(ec)
else:
lines.set_edgecolors(ec[n])
# Alpha
if not isinstance(alpha, list):
lines.set_alpha(alpha)
else:
lines.set_alpha(alpha[n])
# Line width
if not isinstance(lw, list):
lines.set_linewidth(lw)
else:
lines.set_linewidth(lw[n])
# Add to current plot
cax.add_collection(lines)
n += 1
# In[5]:
def city(city, name, bmap, mfc=None, mec=None, color='b', offs=[.1, .1], halign='left'):
"""Plot a circle at <city> and annotate with <name>, with a <bmap> projection.
Parameters
----------
city : List of two scalars
[Northing, Easting].
name : string
name to be plotted with city.
bmap : handle
As you get from bmap = Basemap().
mfc : None or colour; <None>
Marker face-colour for city; if <None>, it will cycle through colour-cycle.
colour : 'none' or colour; <'b'>
Colour for <name>.
offs : List of two scalars; <[.1, .1]>
Offset for <name> from <city>.
halign : {'left', 'right', 'center'}; <'left'>
Alignment of <name> relative to <city>.
"""
# mec from rcParams, to respect theme (dark/light)
if not mec:
mec = rcParams['axes.edgecolor']
# Get current axis
cax = plt.gca()
# Plot dot
# If mfc is provided, use; else cycle through colours
if not mfc:
cycle = cax._get_patches_for_fill.prop_cycler
mfc = next(cycle)['color']
bmap.plot(city[1], city[0], 'o', mfc=mfc, mec=mec, ms=4, mew=1, latlon=True)
# Annotate name
cax.annotate(name, bmap(city[1]+offs[0], city[0]+offs[1]),
horizontalalignment=halign, color=color, fontsize=7, zorder=10)
# In[6]:
def arrow(start, end, bmap, ec=None, fc=None, rad=-.3):
"""Plot an arrow from <start> to <end>, with a <bmap> projection.
Parameters
----------
start : List of two scalars
Start of arrow [Northing, Easting].
end : List of two scalars
End of arrow [Northing, Easting].
bmap : handle
As you get from bmap = Basemap().
ec : 'none' or colour; <'k'>
Edge-colour for arrow.
fc : 'none' or colour; <w>
Face-colour for arrow.
rad : Scalar; <.3]>
Curvature of arrow.
"""
# ec & fc from rcParams, to respect theme (dark/light)
if not ec:
ec = rcParams['axes.edgecolor']
if not fc:
fc = rcParams['axes.facecolor']
# Get current axis
cax = plt.gca()
# Plot arrow
arrowstyle='Fancy, head_length=.6, head_width=.6, tail_width=.4'
cax.annotate('', bmap(end[1], end[0]), bmap(start[1], start[0]),
arrowprops=dict(arrowstyle=arrowstyle,
alpha=.6,
patchA=None,
patchB=None,
shrinkA=3,
shrinkB=3,
fc=fc, ec=ec,
connectionstyle="arc3, rad="+str(rad),
))
|
|
"""
Some common utilities for Uptane, to be assigned to more sensible locations in
the future.
"""
from __future__ import print_function
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import tuf
import tuf.formats
import json
import os
import shutil
import copy
import hashlib
# TODO: This import is not ideal at this level. Common should probably not
# import anything from other Uptane modules. Consider putting the
# signature-related functions into a new module (sig or something) that
# imports asn1_codec.
import uptane.encoding.asn1_codec as asn1_codec
import uptane.formats
# Both key types below are supported, but issues may be encountered with RSA
# if tuf.conf.METADATA_FORMAT is 'der' (rather than 'json').
# TODO: Ensure RSA support in ASN.1/DER conversion.
SUPPORTED_KEY_TYPES = ['ed25519', 'rsa']
def sign_signable(
signable, keys_to_sign_with, datatype,
metadata_format=tuf.conf.METADATA_FORMAT):
"""
<Purpose>
Signs the given signable (e.g. an ECU manifest) with all the given keys.
Wraps sign_over_metadata such that multiple signatures can be generated,
and places them all in the 'signatures' field of the given signable.
Also does some additional argument validation.
<Arguments>
signable:
An object with a 'signed' dictionary and a 'signatures' list:
conforms to tuf.formats.SIGNABLE_SCHEMA
keys_to_sign_with:
A list whose elements must conform to tuf.formats.ANYKEY_SCHEMA.
datatype:
The type of data signable['signed'] represents.
Must be in uptane.encoding.asn1_codec.SUPPORTED_ASN1_METADATA_MODULES.
Specifies the type of data provided in der_data, whether a Time
Attestation, ECU Manifest, or Vehicle Manifest.
'datatype' is used to determine the module to use for the conversion to
ASN.1/DER, if the metadata format is 'der'. When 'der' is the metadata
format, we need to convert to ASN.1/DER first, and conversion to
ASN.1/DER varies by type. 'datatype' doesn't matter if signing is
occuring over JSON.
If the metadata contained a metadata type indicator (the way that
DER TUF metadata does), and if we could also capture this in an ASN.1
specification that flexibly supports each possible metadata type (the
way that the Metadata specification does in TUF ASN.1), then this would
not be necessary....
# TODO: Try to find some way to add the type to the metadata and cover
# these requirements above.
metadata_format: (optional; default tuf.conf.METADATA_FORMAT)
'json' or 'der'. Determines what the signature will be over.
Should generally be left to the default except when testing different
encodings or otherwise intentionally signing a different format.
<Exceptions>
tuf.FormatError if the provided key is not the correct format or lacks a
private element.
uptane.Error if the key type is not in the SUPPORTED_KEY_TYPES for Uptane.
<Side Effects>
Adds a signature to the provided signable.
<Returns>
None. Note that the provided object, 'signable', is modified in place.
"""
# The below was partially modeled after tuf.repository_lib.sign_metadata()
signatures = []
for signing_key in keys_to_sign_with:
tuf.formats.ANYKEY_SCHEMA.check_match(signing_key)
# If we already have a signature with this keyid, skip.
if signing_key['keyid'] in [key['keyid'] for key in signatures]:
print('Already signed with this key.')
continue
# If the given key was public, raise a FormatError.
if 'private' not in signing_key['keyval']:
raise tuf.FormatError('One of the given keys lacks a private key value, '
'and so cannot be used for signing: ' + repr(signing_key))
# We should already be guaranteed to have a supported key type due to
# the ANYKEY_SCHEMA.check_match call above. Defensive programming.
if signing_key['keytype'] not in SUPPORTED_KEY_TYPES:
raise uptane.Error(
'Unsupported key type: ' + repr(signing_key['keytype']))
# Else, all is well. Sign the signable with the given key, adding that
# signature to the signatures list in the signable.
signable['signatures'].append(sign_over_metadata(
signing_key, signable['signed'], datatype=datatype,
metadata_format=metadata_format))
uptane.formats.ANY_SIGNABLE_UPTANE_METADATA_SCHEMA.check_match(signable)
def sign_over_metadata(
key_dict, data, datatype, metadata_format=tuf.conf.METADATA_FORMAT):
"""
<Purpose>
Given a key and data, returns a signature over that data.
Higher level function that wraps tuf.keys.create_signature, and works
specifically with Time Attestations, ECU Manifsts, and Vehicle Manifests
that will be in JSON or ASN.1/DER format.
Almost exactly identical to the function simultaneously added to TUF,
tuf.sig.sign_over_metadata(). Requires datatype, and operates on
Uptane-specific metadata (see 'datatype' argument below)
Must differ in Uptane simply because it is not possible to convert
Uptane-specific metadata (Time Attestations, ECU Manifests, and Vehicle
Manifests) to or from ASN.1/DER without knowing which of those three
types of metadata you're dealign with, and this conversion is required for
signing and verifying signatures.
See tuf.keys.create_signature for lower level details.
<Arguments>
key_dict:
A dictionary containing the TUF keys. An example RSA key dict has the
form:
{'keytype': 'rsa',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
data:
Data object used by create_signature() to generate the signature.
Acceptable format depends somewhat on tuf.conf.METADATA_FORMAT, or, if
the optional argument is provided, metadata_format.
This will be converted into a bytes object and passed down to
tuf.keys.create_signature().
In 'der' mode:
'data' is expected to be a dictionary compliant with
uptane.formats.ANY_SIGNABLE_UPTANE_METADATA_SCHEMA. ASN.1/DER
conversion requires strictly defined formats.
In 'json' mode:
'data' can be any data that can be processed by
tuf.formats.encode_canonical(data) can be signed. This function is
generally intended to sign metadata (tuf.formats.ANYROLE_SCHEMA), but
can be used more broadly.
datatype:
The type of data signable['signed'] represents.
Must be in uptane.encoding.asn1_codec.SUPPORTED_ASN1_METADATA_MODULES.
Specifies the type of data provided in der_data, whether a Time
Attestation, ECU Manifest, or Vehicle Manifest.
'datatype' is used to determine the module to use for the conversion to
ASN.1/DER, if the metadata format is 'der'. When 'der' is the metadata
format, we need to convert to ASN.1/DER first, and conversion to
ASN.1/DER varies by type. 'datatype' doesn't matter if signing is
occuring over JSON.
If the metadata contained a metadata type indicator (the way that
DER TUF metadata does), and if we could also capture this in an ASN.1
specification that flexibly supports each possible metadata type (the
way that the Metadata specification does in TUF ASN.1), then this would
not be necessary....
# TODO: Try to find some way to add the type to the metadata and cover
# these requirements above.
metadata_format: (optional; default based on tuf.conf.METADATA_FORMAT)
If 'json', treats data as a JSON-friendly Python dictionary to be turned
into a canonical JSON string and then encoded as utf-8 before signing.
When operating TUF with DER metadata but checking the signature on some
piece of JSON for some reason, this should be manually set to 'json'. The
purpose of this canonicalization is to produce repeatable signatures
across different platforms and Python key dictionaries (avoiding things
like different signatures over the same dictionary).
If 'der', the data will be converted into ASN.1, encoded as DER,
and hashed. The signature is then checked against that hash.
<Exceptions>
tuf.FormatError, if 'key_dict' is improperly formatted.
tuf.UnsupportedLibraryError, if an unsupported or unavailable library is
detected.
TypeError, if 'key_dict' contains an invalid keytype.
<Side Effects>
The cryptography library specified in 'tuf.conf' is called to do the actual
verification. When in 'der' mode, argument data is converted into ASN.1/DER
in order to verify it. (Argument object is unchanged.)
<Returns>
A signature dictionary conformant to 'tuf.format.SIGNATURE_SCHEMA'. e.g.:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'method': '...',
'sig': '...'}.
"""
tuf.formats.ANYKEY_SCHEMA.check_match(key_dict)
# TODO: Check format of data, based on metadata_format.
# TODO: Consider checking metadata_format redundantly. It's checked below.
if metadata_format == 'json':
data = tuf.formats.encode_canonical(data).encode('utf-8')
elif metadata_format == 'der':
# TODO: Have convert_signed_metadata_to_der take just the 'signed' element
# so we don't have to do this silly wrapping in an empty signable.
data = asn1_codec.convert_signed_metadata_to_der(
{'signed': data, 'signatures': []}, only_signed=True, datatype=datatype)
data = hashlib.sha256(data).digest()
else:
raise tuf.Error('Unsupported metadata format: ' + repr(metadata_format))
return tuf.keys.create_signature(key_dict, data)
def verify_signature_over_metadata(
key_dict, signature, data, datatype,
metadata_format=tuf.conf.METADATA_FORMAT):
"""
<Purpose>
Determine whether the private key belonging to 'key_dict' produced
'signature'. tuf.keys.verify_signature() will use the public key found in
'key_dict', the 'method' and 'sig' objects contained in 'signature',
and 'data' to complete the verification.
Higher level function that wraps tuf.keys.verify_signature, and works
specifically with Time Attestations, ECU Manifsts, and Vehicle Manifests
that will be in JSON or ASN.1/DER format.
Almost exactly identical to the function simultaneously added to TUF,
tuf.sig.verify_signature_over_metadata(). Requires datatype.
Must differ in Uptane simply because it is not possible to convert
Uptane-specific metadata (Time Attestations, ECU Manifests, and Vehicle
Manifests) to or from ASN.1/DER without knowing which of those three
types of metadata you're dealign with, and this conversion is required for
signing and verifying signatures.
See tuf.keys.verify_signature for lower level details.
<Arguments>
key_dict:
A dictionary containing the TUF keys and other identifying information.
If 'key_dict' is an RSA key, it has the form:
{'keytype': 'rsa',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
signature:
The signature dictionary produced by one of the key generation functions.
'signature' has the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'method': 'method',
'sig': sig}.
Conformant to 'tuf.formats.SIGNATURE_SCHEMA'.
data:
Data object over which the validity of the provided signature will be
checked by verify_signature().
Acceptable format depends somewhat on tuf.conf.METADATA_FORMAT, or, if
the optional argument is provided, metadata_format.
This will be converted into a bytes object and passed down to
tuf.keys.verify_signature().
In 'der' mode:
'data' is expected to be a dictionary compliant with
uptane.formats.ANY_SIGNABLE_UPTANE_METADATA_SCHEMA. ASN.1/DER
conversion requires strictly defined formats.
In 'json' mode:
'data' can be any data that can be processed by
tuf.formats.encode_canonical(data). This function is generally intended
to verify signatures over Uptane metadata
(uptane.formats.ANY_SIGNABLE_UPTANE_METADATA_SCHEMA), but can be used
more broadly when in 'json' mode.
metadata_format: (optional; default based on tuf.conf.METADATA_FORMAT)
If 'json', treats data as a JSON-friendly Python dictionary to be turned
into a canonical JSON string and then encoded as utf-8 before checking
against the signature. When operating TUF with DER metadata but checking
the signature on some piece of JSON for some reason, this should be
manually set to 'json'. The purpose of this canonicalization is to
produce repeatable signatures across different platforms and Python key
dictionaries (avoiding things like different signatures over the same
dictionary).
If 'der', the data will be converted into ASN.1, encoded as DER,
and hashed. The signature is then checked against that hash.
<Exceptions>
tuf.FormatError, raised if either 'key_dict' or 'signature' are improperly
formatted.
tuf.UnsupportedLibraryError, if an unsupported or unavailable library is
detected.
tuf.UnknownMethodError. Raised if the signing method used by
'signature' is not one supported.
<Side Effects>
The cryptography library specified in 'tuf.conf' is called to do the actual
verification. When in 'der' mode, argument data is converted into ASN.1/DER
in order to verify it. (Argument object is unchanged.)
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
tuf.formats.ANYKEY_SCHEMA.check_match(key_dict)
tuf.formats.SIGNATURE_SCHEMA.check_match(signature)
# TODO: Check format of data, based on metadata_format.
# TODO: Consider checking metadata_format redundantly. It's checked below.
if metadata_format == 'json':
data = tuf.formats.encode_canonical(data).encode('utf-8')
elif metadata_format == 'der':
# TODO: Have convert_signed_metadata_to_der take just the 'signed' element
# so we don't have to do this silly wrapping in an empty signable.
data = asn1_codec.convert_signed_metadata_to_der(
{'signed': data, 'signatures': []}, only_signed=True, datatype=datatype)
data = hashlib.sha256(data).digest()
else:
raise tuf.Error('Unsupported metadata format: ' + repr(metadata_format))
return tuf.keys.verify_signature(key_dict, signature, data)
def canonical_key_from_pub_and_pri(key_pub, key_pri):
"""
Turn this into a canonical key matching tuf.formats.ANYKEY_SCHEMA, with
the optional element keyid_hash_algorithms, which can be found in the
public key, and containing both public and private key values.
It is assumed that the following elements of each of the two arguments is a
string:
key['keytype']
key['keyid']
key['keyval']['public']
key['keyval']['private'] (for key_pri)
"""
key = {
'keytype': key_pub['keytype'],
'keyid': key_pub['keyid'],
'keyval': {
'public': key_pub['keyval']['public'],
'private': key_pri['keyval']['private']},
'keyid_hash_algorithms': copy.deepcopy(key_pub['keyid_hash_algorithms'])}
tuf.formats.ANYKEY_SCHEMA.check_match(key)
return key
def public_key_from_canonical(key_canonical):
"""
Given a key that includes all public and private key information, return a
public key (assumed to be the canonical key with the 'private' component
of the 'keyval' dictionary stripped).
"""
tuf.formats.ANYKEY_SCHEMA.check_match(key_canonical)
key_public = copy.deepcopy(key_canonical)
del key_public['keyval']['private']
return key_public
# Not sure where to put this yet.
def create_directory_structure_for_client(
client_dir,
pinning_fname,
root_fnames_by_repository):
"""
Creates a directory structure for a client, including current and previous
metadata directories.
Arguments:
client_dir
the client directory, into which metadata and targets will be downloaded
from repositories
pinning_fname
the filename of a pinned.json file to copy and use to map targets to
repositories
root_fnames_by_repository
a dictionary mapping repository name to the filename of the root.json
file for that repository to start with as the root of trust for that
repository.
e.g.
{'ImageRepo': 'distributed_roots/imagerepo_root.json',
'Director': 'distributed_roots/director_root.json'}
Each repository listed in the pinning.json file should have a
corresponding entry in this dict.
"""
# Read the pinning file here and create a list of repositories and
# directories.
# Set up the TUF client directories for each repository.
if os.path.exists(client_dir):
shutil.rmtree(client_dir)
os.makedirs(os.path.join(client_dir, 'metadata'))
# Add a pinned.json to this client (softlink it from the indicated copy).
os.symlink(
pinning_fname, #os.path.join(WORKING_DIR, 'pinned.json'),
os.path.join(client_dir, 'metadata', 'pinned.json'))
pinnings = json.load(open(pinning_fname, 'r'))
for repo_name in pinnings['repositories']:
os.makedirs(os.path.join(client_dir, 'metadata', repo_name, 'current'))
os.makedirs(os.path.join(client_dir, 'metadata', repo_name, 'previous'))
# Set the root of trust we have for that repository.
shutil.copyfile(
root_fnames_by_repository[repo_name],
os.path.join(client_dir, 'metadata', repo_name, 'current',
'root.' + tuf.conf.METADATA_FORMAT))
# Configure tuf with the client's metadata directories (where it stores the
# metadata it has collected from each repository, in subdirectories).
tuf.conf.repository_directory = client_dir # TODO for TUF: This setting should probably be called client_directory instead, post-TAP4.
def scrub_filename(fname, expected_containing_dir):
"""
DO NOT ASSUME THAT THIS TEMPORARY FUNCTION IS SECURE.
Performs basic scrubbing to try to ensure that the filename provided is
actually just a plain filename (no pathing), so that it cannot specify a file
that is not in the provided directory.
May break (exception trigger-happy) if there's a softlink somewhere in the
working directory path.
Returns an absolute path that was confirmed to be inside
expected_containing_dir.
"""
# Assert no tricksy characters. (Improvised, not to be trusted)
assert '..' not in fname and '/' not in fname and '$' not in fname and \
'~' not in fname and b'\\' not in fname.encode('unicode-escape'), \
'Unacceptable string: ' + fname
# Make sure it's in the expected directory.
abs_fname = os.path.abspath(os.path.join(expected_containing_dir, fname))
if not abs_fname.startswith(os.path.abspath(expected_containing_dir)):
raise ValueError('Expected a plain filename. Was given one that had '
'pathing specified that put it in a different, unexpected directory. '
'Filename was: ' + fname)
return abs_fname
|
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import typing as ty
import six
import requests
from sqlitedict import SqliteDict
from rnacentral_pipeline.databases import data
import rnacentral_pipeline.databases.helpers.phylogeny as phy
import rnacentral_pipeline.databases.helpers.publications as pub
from rnacentral_pipeline.databases.ncbi.taxonomy import TaxonomyEntry
class InvalidDotBracket(Exception):
"""
This is raised when the given string cannot be turned into a valid
dot-bracket string.
"""
pass
def extract_download_urls(base_url, text):
"""
Given a chunk of text returned by the main GtRNAdb RNAcentral download url
(http://trna.ucsc.edu/download/RNAcentral/) this can pull out the URLs that
represent the filenames to download.
"""
data = []
for line in text.split("\n"):
# I am aware of how awful it is to parse HTML via regex, but I long for
# the return of the elder gods and I'm doing my part to bring them
# back.
if "/icons/compressed.gif" in line:
match = re.search('href="([^"]+)"', line)
assert match
filename = match.group(1)
data.append((filename, six.moves.urllib_parse.urljoin(base_url, filename)))
assert data, "Given text contained now downloadable urls"
return data
def downloadable_files(base_url):
"""
Determine all remote files to download.
"""
response = requests.get(base_url)
return extract_download_urls(base_url, response.text)
def url(data):
return data["url"]
def anticodon(data):
"""
Get the anticodon of this entry.
"""
return data["sequenceFeatures"]["anticodon"]["sequence"]
def note_data(data):
"""
Create the dict that will be stored as a note. This is basically whatever
GtRNAdb gives us, with a few duplicate or unneeded fields removed.
"""
note = {}
note["url"] = url(data)
return note
def chromosome(location):
"""
Get the chromosome this location is part of.
"""
chrom = location["exons"][0]["chromosome"]
if chrom == "Chromosome":
return "chr"
return chrom
def taxid(data: ty.Dict[str, str]) -> int:
_, taxid = data["taxonId"].split(":", 1)
return int(taxid)
def lineage(taxonomy: SqliteDict, data):
"""
Get a standardized lineage for the given taxon id.
"""
ncbi_taxid = taxid(data)
if str(ncbi_taxid) in taxonomy:
return taxonomy[ncbi_taxid].lineage
return phy.lineage(ncbi_taxid)
def species(taxonomy, data):
"""
Get a standardized species name for the given taxon id.
"""
ncbi_taxid = taxid(data)
if ncbi_taxid in taxonomy:
return taxonomy[ncbi_taxid].name
return phy.species(ncbi_taxid)
def description(taxonomy, data):
"""
Generate a description for the entries specified by the data.
"""
return "{name} {details}".format(
name=species(taxonomy, data),
details=product(data),
)
def product(data):
"""
Generate the product for the entries specified by the data.
"""
return data["name"]
def primary_id(data, location):
"""
Generate a primary key for the given data and location.
"""
start = min(int(e["startPosition"]) for e in location["exons"])
stop = max(int(e["endPosition"]) for e in location["exons"])
return "GTRNADB:{gene}:{accession}:{start}-{stop}".format(
gene=data["gene"]["symbol"],
accession=location["exons"][0]["INSDC_accession"],
start=start,
stop=stop,
)
def dot_bracket(data):
"""
Generate a dot bracket string from the secondary structure string that
GtRNAdb uses. That is turn '>>..<<' to '((..))'.
"""
transformed = data["secondary_structure"].replace(">", "(").replace("<", ")")
if set(transformed) != set("(.)"):
raise InvalidDotBracket("Unexpected characters in %s" % transformed)
return transformed
def parent_accession(location):
"""
Get the parent accessino for the given location.
"""
return location["exons"][0]["INSDC_accession"]
def accession(data, location):
"""
Generate an accession for the given location in data.
"""
return "{ac}:{gene}".format(
ac=parent_accession(location),
gene=data["gene"]["symbol"],
)
def seq_version(_):
"""
Compute a seq_version for GtRNAdb data. CUrrentlyt his will always return
'1'
"""
return "1"
def references(metadata):
"""
Returns the default accessions for GtRNAdb data.
"""
return [pub.reference(pmid) for pmid in metadata["publications"]]
def sequence(data):
if "matureSequence" in data:
return data["matureSequence"].upper()
return data["sequence"].upper()
def features(raw):
anti = raw["sequenceFeatures"].get("anticodon", None)
if not anti:
return []
return [
data.SequenceFeature(
name="anticodon",
feature_type="anticodon",
location=anti["indexes"],
sequence=anti["sequence"],
metadata={
"isotype": raw["sequenceFeatures"]["isotype"],
"sequence": anti["sequence"],
},
)
]
def regions(location):
if not location:
return []
strand = data.Strand.build(location["exons"][0]["strand"])
exons = []
for exon in location["exons"]:
start = exon["startPosition"]
stop = exon["endPosition"]
if start > stop:
start, stop = stop, start
exons.append(
data.Exon(
start=start,
stop=stop,
)
)
exons = tuple(exons)
return [
data.SequenceRegion(
assembly_id=location["assembly"],
chromosome=chromosome(location),
strand=strand,
exons=exons,
coordinate_system=data.CoordinateSystem.one_based(),
)
]
def gene_synonyms(raw: ty.Dict[str, ty.Any]) -> ty.List[str]:
return raw["gene"]["synonyms"]
def gene(raw: ty.Dict[str, ty.Any]) -> str:
return raw["gene"]["symbol"]
def optional_id(raw) -> str:
return raw["gene"]["symbol"]
|
|
# -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
import logging
import os
import shutil
import sys
from testfixtures import log_capture
from wakatime.compat import u
from wakatime.constants import API_ERROR, SUCCESS
from wakatime.packages import certifi
from wakatime.packages.requests.exceptions import RequestException
from wakatime.packages.requests.models import Response
from . import utils
from .utils import CustomResponse
try:
from mock import ANY, call
except ImportError:
from unittest.mock import ANY, call
class ProxyTestCase(utils.TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def test_proxy_without_protocol(self):
response = CustomResponse()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'user:pass@localhost:12345'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_called_once_with(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where())
def test_https_proxy(self):
response = CustomResponse()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'https://user:pass@localhost:12345'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_called_once_with(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where())
def test_socks_proxy(self):
response = CustomResponse()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'socks5://user:pass@localhost:12345'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_called_once_with(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where())
def test_ntlm_proxy_used_after_trying_normal_proxy(self):
response = Response()
response.status_code = 400
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'domain\\user:pass'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
self.patched['wakatime.session_cache.SessionCache.get'].assert_has_calls([call(), call()])
self.patched['wakatime.session_cache.SessionCache.delete'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
expected_calls = [
call(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where()),
call(ANY, cert=None, proxies={}, stream=False, timeout=60, verify=certifi.where()),
]
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_has_calls(expected_calls)
@log_capture()
def test_ntlm_proxy_used_after_normal_proxy_raises_exception(self, logs):
logging.disable(logging.NOTSET)
ex_msg = 'after exception, should still try ntlm proxy'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].side_effect = RuntimeError(ex_msg)
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'domain\\user:pass'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
self.assertIn(ex_msg, log_output)
self.patched['wakatime.session_cache.SessionCache.get'].assert_has_calls([call(), call()])
self.patched['wakatime.session_cache.SessionCache.delete'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
expected_calls = [
call(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where()),
call(ANY, cert=None, proxies={}, stream=False, timeout=60, verify=certifi.where()),
]
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_has_calls(expected_calls)
@log_capture()
def test_ntlm_proxy_used_after_normal_proxy_raises_requests_exception(self, logs):
logging.disable(logging.NOTSET)
ex_msg = 'after exception, should still try ntlm proxy'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].side_effect = RequestException(ex_msg)
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'domain\\user:pass'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
self.assertEquals(sys.stderr.getvalue(), '')
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
self.assertEquals('', log_output)
self.patched['wakatime.session_cache.SessionCache.get'].assert_has_calls([call(), call()])
self.patched['wakatime.session_cache.SessionCache.delete'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
expected_calls = [
call(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=certifi.where()),
call(ANY, cert=None, proxies={}, stream=False, timeout=60, verify=certifi.where()),
]
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_has_calls(expected_calls)
@log_capture()
def test_invalid_proxy(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'invaliddd:proxyarg'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, 2)
self.assertEquals(sys.stdout.getvalue(), '')
expected = 'error: Invalid proxy. Must be in format https://user:pass@host:port or socks5://user:pass@host:port or domain\\user:pass.'
self.assertIn(expected, sys.stderr.getvalue())
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = ''
self.assertEquals(log_output, expected)
self.patched['wakatime.session_cache.SessionCache.get'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_not_called()
|
|
import re
import urllib
import difflib
from grab.tools import rex
from grab.error import DataNotFound
from rdflib import URIRef, Literal, Graph
from rdflib.plugins.stores.sparqlstore import SPARQLStore
from rdflib.namespace import RDF, RDFS, XSD, FOAF
from base import Parser, ListParser, create_proceedings_uri, find_university_in_dbpedia, create_conference_uri
import config
import utils
from namespaces import BIBO, TIMELINE, SWC, SWRC, SKOS
XPATH_SUMMARY = '/html/body/table[position()>1]//tr[td]'
XPATH_SUMMARY_TITLE = './/td[last()]//a[@href]'
def extract_year(string):
return '20' + string.strip()[-2:]
def create_workshop_uri(volume_number):
return URIRef(config.id['workshop'] + volume_number)
def tonumber(string):
if isinstance(string, basestring):
if string.lower() == 'first':
return 1
elif string.lower() == 'second':
return 2
elif string.lower() == 'third':
return 3
elif string.lower() == 'forth' or string.lower() == 'fourth':
return 4
elif string.lower() == 'fifth':
return 5
return string
class WorkshopSummaryParser(ListParser):
def __init__(self, grab, task, graph, spider=None):
ListParser.__init__(self, grab, task, graph, failonerror=True, spider=spider)
def add_workshop(self, workshop):
if len(workshop) != 0:
if 'workshops' not in self.data:
self.data['workshops'] = [workshop]
else:
self.data['workshops'].append(workshop)
def extract_list(self):
tr = self.grab.tree.xpath(XPATH_SUMMARY)
for i in range(0, len(tr), 2):
element = list()
#<a> with the title
element.append(tr[i].find(XPATH_SUMMARY_TITLE))
#text with the summary information
element.append(tr[i + 1].find('.//td[last()]').text_content())
if element[0].get('href') in config.input_urls or len(config.input_urls) == 1:
self.list.append(element)
def parse_template_1(self, element):
"""
A template for joint proceedings of two workshops:
Examples:
- http://ceur-ws.org/Vol-943/
"""
workshop_1 = {'id': 1}
workshop_2 = {'id': 2}
summary = rex.rex(element[1], r'^\s*(proceedings\s+of\s+the\s+joint\s+workshop\s+on.*\((\w+)\+(\w+)\s+\d+\).*)'
r'Edited\s+by.*',
re.I | re.S)
if len(summary.groups()) != 3:
raise DataNotFound()
title = summary.group(1)
workshop_1['volume_number'] = workshop_2['volume_number'] = \
WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
workshop_1['url'] = workshop_2['url'] = element[0].get('href')
workshop_1['time'] = workshop_2['time'] = utils.parse_date(title)
workshop_1['short_label'] = summary.group(2)
workshop_2['short_label'] = summary.group(3)
self.add_workshop(workshop_1)
self.add_workshop(workshop_2)
def parse_template_2(self, element):
"""
A template for joint proceedings of two workshops:
Examples:
- http://ceur-ws.org/Vol-776/
"""
workshop_1 = {'id': 1}
workshop_2 = {'id': 2}
summary = rex.rex(element[1], r'^\s*(proceedings\s+of\s+joint.*on.*\((\w+)\-(\w+)\s+\d+\).*)Edited by.*',
re.I | re.S)
if len(summary.groups()) != 3:
raise DataNotFound()
title = summary.group(1)
workshop_1['volume_number'] = workshop_2['volume_number'] = \
WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
workshop_1['url'] = workshop_2['url'] = element[0].get('href')
workshop_1['time'] = workshop_2['time'] = utils.parse_date(title)
workshop_1['short_label'] = summary.group(2)
workshop_2['short_label'] = summary.group(3)
self.add_workshop(workshop_1)
self.add_workshop(workshop_2)
def parse_template_3(self, element):
"""
A template for joint proceedings of two workshops.
Examples:
- http://ceur-ws.org/Vol-1098/
- http://ceur-ws.org/Vol-989/
"""
workshop_1 = {'id': 1}
workshop_2 = {'id': 2}
summary = self.rex(element[1], [
r"(joint\s+proceedings\s+of\s+([\s\w,]+)\(([a-zA-Z]+)['\s]?\d+\)[and,\s]+"
r"([:\s\w-]+)\(([a-zA-Z]+)['\s]?\d+\)([\w\s\-.,^\(]*|[,\s]+workshops\s+of.*|[,\s]+co-located.*))Edited by.*",
r"(proceedings\s+of\s+joint([\s\w,]+)\(([a-zA-Z]+)['\s]?\d{0,4}\)[and,\s]+"
r"([:,\s\w-]+)\(([a-zA-Z]+)['\s]?\d{0,4}\)([\w\s\-.,^\(]*|[,\s]+workshops\s+of.*|[,\s]+co-located.*))Edited by.*"
], re.I | re.S)
if len(summary.groups()) != 6:
raise DataNotFound()
title = summary.group(1)
workshop_1['volume_number'] = workshop_2['volume_number'] = \
WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
workshop_1['url'] = workshop_2['url'] = element[0].get('href')
workshop_1['time'] = workshop_2['time'] = utils.parse_date(title)
workshop_1['label'] = summary.group(2)
workshop_1['short_label'] = summary.group(3)
workshop_2['label'] = summary.group(4)
workshop_2['short_label'] = summary.group(5)
self.add_workshop(workshop_1)
self.add_workshop(workshop_2)
def parse_template_4(self, element):
"""
A template for joint proceedings of three workshops.
Examples:
- http://ceur-ws.org/Vol-981/
- http://ceur-ws.org/Vol-862/
- http://ceur-ws.org/Vol-853/
"""
workshop_1 = {'id': 1}
workshop_2 = {'id': 2}
workshop_3 = {'id': 3}
summary = self.rex(element[1], [
r'(joint\s+proceedings\s+of\s+[the]*.*workshops:\s*([\s\w]+)\(([a-zA-Z]+)\d+\)'
r'[and,\s]+([\s\w]+)\(([a-zA-Z]+)\d+\)[and,\s]+([\s\w]+)\(([a-zA-Z]+)\d+\)[,\s]+.*)Edited by.*',
r"(joint\s+proceedings\s+of\s+([\s\w,]+)\(([a-zA-Z]+)['\s]?\d+\)[and,\s]+([\s\w-]+)\(([a-zA-Z]+)['\s]?\d+\)"
r"[and,\s]+([\s\w]+)\(([a-zA-Z]+)['\s]?\d+\)[,\s]+.*)Edited by.*"
],
re.I | re.S)
if len(summary.groups()) != 7:
raise DataNotFound()
title = summary.group(1)
workshop_1['volume_number'] = workshop_2['volume_number'] = workshop_3['volume_number'] = \
WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
workshop_1['url'] = workshop_2['url'] = workshop_3['url'] = element[0].get('href')
workshop_1['time'] = workshop_2['time'] = workshop_3['time'] = utils.parse_date(title)
workshop_1['label'] = summary.group(2)
workshop_1['short_label'] = summary.group(3)
workshop_2['label'] = summary.group(4)
workshop_2['short_label'] = summary.group(5)
workshop_3['label'] = summary.group(6)
workshop_3['short_label'] = summary.group(7)
self.add_workshop(workshop_1)
self.add_workshop(workshop_2)
self.add_workshop(workshop_3)
def parse_template_5(self, element):
"""
A template for a workshop with the conference acronym and year in the name
Examples:
- http://ceur-ws.org/Vol-958/
"""
workshop = {}
title = rex.rex(element[1], r'(.*)Edited\s*by.*', re.I | re.S).group(1)
workshop['volume_number'] = WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
label_part = rex.rex(element[0].text, r'(.*)\sat\s(\w{2,})\s(\d{4})[\s\.]*', re.I | re.S)
workshop['label'] = label_part.group(1)
workshop['conf_acronym'] = label_part.group(2)
workshop['conf_year'] = label_part.group(3)
workshop['url'] = element[0].get('href')
workshop['time'] = utils.parse_date(title)
try:
workshop['edition'] = tonumber(
rex.rex(title,
r'.*Proceedings(\s*of)?(\s*the)?\s*(\d{1,}|first|second|third|forth|fourth|fifth)[thrd]*'
r'.*Workshop.*',
re.I, default=None).group(3))
except:
#'edition' property is optional
pass
self.add_workshop(workshop)
def parse_template_6(self, element):
workshop = {}
title = rex.rex(element[1], r'(.*)Edited\s*by.*', re.I | re.S).group(1)
workshop['volume_number'] = WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
workshop['label'] = element[0].text.replace('.', '')
workshop['url'] = element[0].get('href')
workshop['time'] = utils.parse_date(title)
try:
workshop['edition'] = tonumber(
rex.rex(title,
r'.*Proceedings(\s*of)?(\s*the)?\s*(\d{1,}|first|second|third|forth|fourth|fifth)[thrd]*'
r'.*Workshop.*',
re.I, default=None).group(3))
except:
#'edition' property is optional
pass
self.add_workshop(workshop)
def write(self):
triples = []
for workshop in self.data['workshops']:
if 'id' in workshop:
resource = create_workshop_uri("%s#%s" % (workshop['volume_number'], workshop['id']))
else:
resource = create_workshop_uri(workshop['volume_number'])
proceedings = URIRef(workshop['url'])
triples.append((resource, RDF.type, BIBO.Workshop))
if 'label' in workshop:
triples.append((resource, SWRC.eventTitle, Literal(workshop['label'], datatype=XSD.string)))
elif 'short_label' in workshop:
triples.append((resource, SWRC.eventTitle, Literal(workshop['short_label'], datatype=XSD.string)))
else:
raise Exception('[WORKSHOP %s] Doesn\'t have a label!' % workshop['url'])
triples.append((proceedings, BIBO.presentedAt, resource))
if 'edition' in workshop:
triples.append((resource, SWRC.edition, Literal(workshop['edition'], datatype=XSD.integer)))
if 'short_label' in workshop:
triples.append((resource, BIBO.shortTitle, Literal(workshop['short_label'], datatype=XSD.string)))
if workshop['time'] and len(workshop['time']) > 1:
triples.append((
resource,
TIMELINE.beginsAtDateTime,
Literal(workshop['time'][0].strftime('%Y-%m-%d'), datatype=XSD.date)))
triples.append((
resource,
TIMELINE.endsAtDateTime,
Literal(workshop['time'][1].strftime('%Y-%m-%d'), datatype=XSD.date)))
elif workshop['time'] and len(workshop['time']) > 0:
triples.append((
resource,
TIMELINE.atDate,
Literal(workshop['time'][0].strftime('%Y-%m-%d'), datatype=XSD.date)))
#For parse_template_5
if 'conf_acronym' in workshop and 'conf_year' in workshop:
conference = create_conference_uri(workshop['conf_acronym'], workshop['conf_year'])
triples.append((conference, RDF.type, SWRC.Conference))
triples.append((conference, BIBO.shortTitle, Literal(workshop['conf_acronym'], datatype=XSD.string)))
triples.append((conference, TIMELINE.atDate, Literal(workshop['conf_year'], datatype=XSD.gYear)))
triples.append((resource, SWC.isSubEventOf, conference))
self.write_triples(triples)
class WorkshopPageParser(Parser):
def __init__(self, grab, task, graph, spider=None):
Parser.__init__(self, grab, task, graph, failonerror=False, spider=spider)
def begin_template(self):
self.data['volume_number'] = WorkshopPageParser.extract_volume_number(self.task.url)
def end_template(self):
pass
def parse_template_1(self):
"""
Examples:
- http://ceur-ws.org/Vol-1008/
- http://ceur-ws.org/Vol-1081/
- http://ceur-ws.org/Vol-1085/
"""
self.begin_template()
try:
colocated = rex.rex(self.grab.tree.xpath('//span[@class="CEURCOLOCATED"]/text()')[0],
r'([a-zA-Z\s*]+)[\s\']*(\d{4}|\d{2})', re.I)
except IndexError as ex:
raise DataNotFound(ex)
self.data['acronym'] = colocated.group(1).strip()
self.data['year'] = extract_year(colocated.group(2))
self.end_template()
def parse_template_2(self):
"""
Examples:
- http://ceur-ws.org/Vol-996/
- http://ceur-ws.org/Vol-937/
- http://ceur-ws.org/Vol-838/
- http://ceur-ws.org/Vol-840/
- http://ceur-ws.org/Vol-859/
"""
self.begin_template()
try:
colocated = self.rex(self.grab.tree.xpath('//span[@class="CEURFULLTITLE"]/text()')[0],
[
r".*proceedings of the\s*([a-zA-Z]{2,})[\s'-]*(\d{4}|\d{2})\s+"
r"(workshop|conference|posters).*",
r".*at\s+([a-zA-Z]{2,})[\s'-]*(\d{4}|\d{2})\)+",
r"^([a-zA-Z]{2,})[\s'-]*(\d{2}|\d{4})\s+workshop"
], re.I)
except IndexError as ex:
raise DataNotFound(ex)
self.data['acronym'] = colocated.group(1).strip()
self.data['year'] = extract_year(colocated.group(2))
self.end_template()
def parse_template_3(self):
"""
Examples:
- http://ceur-ws.org/Vol-951/
"""
self.begin_template()
header = ' '.join(self.grab.tree.xpath(r'/html/body//*[following-sibling::*[contains(., "Edited by")] '
r'and not(self::table)]/descendant-or-self::*/text()'))
colocated = self.rex(header, [
r".*(in\s+conjun?ction|co[l-]?located)\s+with.*conference.*\(\s*([a-zA-Z]{2,})[-'\s]*(\d{4}|\d{2})\s*\).*",
r".*(proceedings\s+of\s+the)\s+([a-zA-Z]{2,})[\s'-]*(\d{4}|\d{2})\s+workshop.*",
r".*(workshop\s+at\s+|a\s+workshop\s+of\s+).*\(\s*([a-zA-Z-]{2,})[\s'-]*(\d{4}|\d{2})\s*\).*",
r".*(proceedings\s+of).*\(.*at\s+([a-zA-Z]{2,})[\s'-]*(\d{4}|\d{2})\).*",
r".*(co-located\s+with|a\s+workshop\s+of).*conference[\s,]+([a-zA-Z]{3,})[\s'-]*(\d{4}|\d{2}).*"
], re.IGNORECASE | re.DOTALL)
self.data['acronym'] = colocated.group(2).strip()
self.data['year'] = extract_year(colocated.group(3))
self.end_template()
def write(self):
triples = []
proceedings = create_proceedings_uri(self.data['volume_number'])
conference = URIRef(config.id['conference'] + urllib.quote(self.data['acronym'] + "-" + self.data['year']))
triples.append((conference, RDF.type, SWRC.Conference))
triples.append((conference, BIBO.shortTitle, Literal(self.data['acronym'], datatype=XSD.string)))
triples.append((conference, TIMELINE.atDate, Literal(self.data['year'], datatype=XSD.gYear)))
for workshop in self.graph.objects(proceedings, BIBO.presentedAt):
triples.append((workshop, SWC.isSubEventOf, conference))
self.write_triples(triples)
class WorkshopRelationsParser(ListParser):
@staticmethod
def long_to_short(l):
try:
return filter(unicode.isupper, l)
except TypeError:
return filter(str.isupper, l)
def find_labels(self, term):
w_a_labels = [
label.toPython() for label in self.graph.objects(term, SWRC.eventTitle | BIBO.shortTitle)
]
return set(w_a_labels + map(self.long_to_short, w_a_labels))
def is_related(self, w_a, w_b):
w_a_labels = self.find_labels(w_a)
w_b_labels = self.find_labels(w_b)
related = False
for l_a in w_a_labels:
close_matches = difflib.get_close_matches(l_a, w_b_labels)
# print "======\n%s\n%s" % (l_a, close_matches)
if len(close_matches) > 0:
related = True
break
return related
def extract_list(self):
tr = self.grab.tree.xpath(XPATH_SUMMARY)
for i in range(0, len(tr), 2):
element = list()
#<a> with the title
element.append(tr[i].find(XPATH_SUMMARY_TITLE))
if element[0].get('href') in config.input_urls or len(config.input_urls) == 1:
self.list.append(element)
def parse_template_main(self, element):
self.data['volume_number'] = WorkshopRelationsParser.extract_volume_number(element[0].get('href'))
def write(self):
triples = []
proceedings = create_proceedings_uri(self.data['volume_number'])
workshops = self.graph.objects(proceedings, BIBO.presentedAt)
proceedings_related = self.graph.objects(proceedings, RDFS.seeAlso)
workshops_related = []
for p_related in proceedings_related:
map(workshops_related.append, self.graph.objects(p_related, BIBO.presentedAt))
for workshop in workshops:
for workshop_related in workshops_related:
if self.is_related(workshop, workshop_related):
triples.append((workshop, RDFS.seeAlso, workshop_related))
self.write_triples(triples)
class WorkshopAcronymParser(ListParser):
"""
NOTE: The parser doesn't support joint proceedings/workshops, they're just ignored.
"""
def __init__(self, grab, task, graph, spider):
ListParser.__init__(self, grab, task, graph, failonerror=False, spider=spider)
def extract_list(self):
tr = self.grab.tree.xpath(XPATH_SUMMARY)
for i in range(0, len(tr), 2):
element = list()
#<a> with the title
element.append(tr[i].find(XPATH_SUMMARY_TITLE))
#text with the summary information
element.append(tr[i + 1].find('.//td[last()]').text_content())
url = element[0].get('href')
workshops = [w for w in self.graph.objects(URIRef(url), BIBO.presentedAt)]
#This parser doesn't support joint proceedings/workshops
if (url in config.input_urls or len(config.input_urls) == 1) and len(workshops) == 1:
self.list.append(element)
def parse_template_1(self, element):
title = rex.rex(element[1], r'(.*)Edited\s*by.*', re.I | re.S).group(1).replace('\n', '')
if re.match(r'^proceedings of the[joint ]*.*workshops.*|^joint proceedings.*', title, re.I | re.S):
raise DataNotFound()
labels = rex.rex(title, r".*\((([\da-zA-Z*@\-&:]+?)['\s-]*(\d{2}|\d{4})|"
r"([\da-zA-Z*@\-&:]+?)['\s-]*(\d{2}|\d{4})\s+at.*)\).*",
re.I | re.S)
short_label = labels.group(2)
self.data['volume_number'] = WorkshopSummaryParser.extract_volume_number(element[0].get('href'))
self.data['short_label'] = short_label
def write(self):
triples = []
workshop = create_workshop_uri(self.data['volume_number'])
triples.append((workshop, BIBO.shortTitle, Literal(self.data['short_label'], datatype=XSD.string)))
self.write_triples(triples)
class JointWorkshopsEditorsParser(Parser):
def __init__(self, grab, task, graph, spider=None):
Parser.__init__(self, grab, task, graph, failonerror=False, spider=spider)
def begin_template(self):
self.data['volume_number'] = WorkshopPageParser.extract_volume_number(self.task.url)
self.data['proceedings'] = create_proceedings_uri(self.data['volume_number'])
workshops = [w for w in self.graph.objects(self.data['proceedings'], BIBO.presentedAt)]
if len(workshops) > 1:
self.data['workshops'] = workshops
else:
raise DataNotFound('Skipping http://ceur-ws.org/Vol-%s/ proceedings, because it\'s not joint'
% self.data['volume_number'])
def parse_template_1(self):
"""
Examples:
- http://ceur-ws.org/Vol-981/
"""
self.begin_template()
editors_block = u' '.join(
self.grab.tree.xpath('/html/body//text()[preceding::*[contains(., "Edited by")] and '
'following::*[contains(.,"Table of Contents") or @class="CEURTOC"]]'))
editors = self.graph.objects(self.data['proceedings'], SWRC.editor)
self.data['chairs'] = dict()
for editor in editors:
name = self.graph.objects(editor, FOAF.name).next()
regexp = u'.*' + name + u'[\s~\xc2\xb0@#$%\^&*+-\xc2\xac]*\((\w+?)\d+\).*'
match = re.match(regexp, editors_block,
re.I | re.S)
if match:
self.data['chairs'][editor] = match.group(1)
if len(self.data['chairs']) == 0:
raise DataNotFound()
def write(self):
triples = []
workshops = [w for w in self.graph.objects(self.data['proceedings'], BIBO.presentedAt)]
for editor, v in self.data['chairs'].iteritems():
for workshop in workshops:
chair = URIRef(workshop.toPython() + '/chair')
closes = difflib.get_close_matches(v,
[label.toPython() for label in
self.graph.objects(workshop, RDFS.label | BIBO.shortTitle)])
if len(closes) > 0:
triples.append((chair, RDF.type, SWC.Chair))
triples.append((editor, SWC.holdsRole, chair))
triples.append((chair, SWC.heldBy, editor))
triples.append((workshop, SWC.hasRole, chair))
triples.append((chair, SWC.isRoleAt, workshop))
break
self.write_triples(triples)
class EditorAffiliationParser(Parser):
def __init__(self, grab, task, graph, spider=None):
Parser.__init__(self, grab, task, graph, spider=spider)
self.dbpedia = Graph(SPARQLStore(config.sparqlstore['dbpedia_url'],
context_aware=False), namespace_manager=self.graph.namespace_manager)
def begin_template(self):
self.data['volume_number'] = WorkshopPageParser.extract_volume_number(self.task.url)
self.data['proceedings'] = create_proceedings_uri(self.data['volume_number'])
def parse_template_1(self):
self.begin_template()
header = '\n'.join(self.grab.tree.xpath('/html/body//text()[preceding::*[contains(., "Edited by")] '
'and following::*[contains(.,"Table of Contents") or @class="CEURTOC"]]'))
tokens = [re.sub(r'[\n\r,]+', '', token.strip()) for token in re.split(r'[,\t\n\r\f\*\+]+', header, re.I | re.S)
if len(token.strip()) > 0]
self.data['universities'] = find_university_in_dbpedia(self.dbpedia, tokens)
def write(self):
triples = []
for u in self.data['universities']:
triples.append((self.data['proceedings'], SWRC.affiliation, u))
self.write_triples(triples)
class EditorNameExpandParser(Parser):
def __init__(self, grab, task, graph, spider=None):
Parser.__init__(self, grab, task, graph, failonerror=False, spider=spider)
def begin_template(self):
self.data['volume_number'] = WorkshopPageParser.extract_volume_number(self.task.url)
self.data['proceedings'] = create_proceedings_uri(self.data['volume_number'])
self.editors = []
for editor in self.graph.objects(self.data['proceedings'], FOAF.maker):
editor_name = self.graph.value(editor, FOAF.name)
if(editor_name.split(' ', 1)[0].find('.') > 0):
self.editors.append((editor, editor_name))
if len(self.editors) == 0:
raise DataNotFound('Skipping http://ceur-ws.org/Vol-%s/, because the name are okay'
% self.data['volume_number'])
def parse_template_1(self):
"""
Examples:
- http://ceur-ws.org/Vol-1/
"""
self.begin_template()
header = '\n'.join(self.grab.tree.xpath('/html/body//text()[preceding::*[contains(., "Edited by")] '
'and following::*[contains(.,"Table of Contents") or @class="CEURTOC"]]'))
self.data['editors'] = []
for turtle in self.editors:
regexp = u'.*(' + turtle[1].replace('.', '.*') + ').*'
match = re.match(regexp, header, re.I | re.S)
if match:
self.data['editors'].append((turtle[0], match.group(1)))
def write(self):
triples = []
for turtle in self.data['editors']:
triples.append((turtle[0], FOAF.name, Literal(turtle[1], datatype=XSD.string)))
self.write_triples(triples)
|
|
import argparse, json, os, random, subprocess, sys, time
from parcellearning.utilities import gnnio
from parcellearning.utilities.load import load_schema, load_model
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
import torch
from niio import write
def main(args):
schema = load_schema(args.schema_file)
if args.no_background:
print('Excluding background in accuracy calculations')
pred_dir = '%s/predictions/no_background/' % (schema['data']['out'])
else:
pred_dir = '%s/predictions/' % (schema['data']['out'])
Path(pred_dir).mkdir(parents=True, exist_ok=True)
features = schema['features']
features.sort()
assess_subjects = '%ssubjects/%s.txt' % (schema['data']['in'], args.data)
with open(assess_subjects, 'r') as f:
subjects = f.read().split()
label_table = '%sL.labeltable' % (schema['data']['in'])
graphs = gnnio.dataset(dType=args.data,
features=features,
dSet=schema['data'][args.data],
norm=True,
aggregate=True,
clean=True)
# get model file
model_parameters = '%s%s.earlystop.Loss.pt' % (schema['data']['out'], schema['model'])
model = load_model(schema, model_parameters)
accuracies = np.zeros((len(graphs),))
F = np.zeros((len(graphs),))
predictions = np.zeros((32492, len(graphs)))
agg_alpha = []
for i, graph in enumerate(graphs):
model.eval()
with torch.no_grad():
test_X = graph.ndata['features']
test_Y = graph.ndata['label']
idx = graph.ndata['idx']
P = np.zeros((32492, 2))*np.nan
test_logits, alpha = model(**{'g': graph, 'inputs': test_X, 'label': test_Y, 'return_alpha': True})
A = np.zeros((32492, alpha.shape[1]))
A[idx, :] = alpha
agg_alpha.append(A)
_,indices = torch.max(test_logits, dim=1)
if args.no_background:
background = (test_Y == 0)
indices = indices[~background]
test_Y = test_Y[~background]
idx = idx[~background]
correct = torch.sum(indices == test_Y)
# compute accuracy and f1-score
acc = correct.item() * 1.0 / len(test_Y)
f = f1_score(test_Y, indices, labels=np.arange(1,181), average='micro')
# store accuracy and F1 metrics
accuracies[i] = acc
F[i] = f
# update buffer label to have value of -1
indices[indices == 0] = -1
test_Y[test_Y == 0] = -1
# save prediction and ground truth to single file
P[idx, 0] = indices
P[idx, 1] = test_Y
# save output predictions as metric file
out_func_file = '%s%s.L.%s.func.gii' % (
pred_dir, subjects[i], schema['model'])
write.save(P, out_func_file, 'L')
# convert metric file to label file
out_label_file = '%s%s.L.%s.label.gii' % (
pred_dir, subjects[i], schema['model'])
bashCommand = "wb_command -metric-label-import %s %s %s" % (
out_func_file, label_table, out_label_file)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
# save learned layer-wise attentions as metric file
out_attn_file = '%s%s.L.%s.Attention.func.gii' % (
pred_dir, subjects[i], schema['model'])
write.save(A, out_attn_file, 'L')
# remove func file
os.remove(out_func_file)
# store predicted maps
predictions[idx, i] = indices
print('Mean accuracy: %.3f' % accuracies.mean())
# save all predicted maps into single file
out_func_file = '%s%s.predictions.%s.func.gii' % (pred_dir, schema['model'], args.data)
write.save(predictions, out_func_file, 'L')
out_label_file = '%s%s.predictions.%s.label.gii' % (pred_dir, schema['model'], args.data)
bashCommand = "wb_command -metric-label-import %s %s %s" % (
out_func_file, label_table, out_label_file)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
os.remove(out_func_file)
# save consensus prediction map
axis = 1
u, idx = np.unique(predictions, return_inverse=True)
consensus = u[np.argmax(np.apply_along_axis(np.bincount, axis, idx.reshape(predictions.shape),
None, np.max(idx) + 1), axis=axis)]
out_func_file = '%s%s.predictions.consensus.%s.func.gii' % (pred_dir, schema['model'], args.data)
write.save(consensus, out_func_file, 'L')
out_label_file = '%s%s.predictions.consensus.%s.label.gii' % (pred_dir, schema['model'], args.data)
bashCommand = "wb_command -metric-label-import %s %s %s" % (
out_func_file, label_table, out_label_file)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
os.remove(out_func_file)
# save consensus attention map
agg_alpha = np.stack(agg_alpha, axis=2).mean(2)
out_func_file = '%s%s.attention.consensus.%s.func.gii' % (pred_dir, schema['model'], args.data)
write.save(agg_alpha, out_func_file, 'L')
# plot test performance metrics
df = pd.DataFrame(np.column_stack([accuracies, F, subjects]), columns=['acc', 'f1', 'subject'])
df_file = '%s%s.metrics.%s.csv' % (pred_dir, schema['model'], args.data)
df.to_csv(df_file)
fig, [ax1, ax2] = plt.subplots(1,2, figsize=(12,5))
# test accuracy distribution
ax1.hist(accuracies, density=True)
ax1.tick_params(labelsize=15)
ax1.set_xlabel('Accuracy', fontsize=15)
ax1.set_ylabel('Density', fontsize=15)
ax1.set_xlim([0,1])
ax1.set_title('Model %s Performance: Accuracy' % (args.data.capitalize()), fontsize=20)
# test F1 distribution
ax2.hist(F, density=True)
ax2.tick_params(labelsize=15)
ax2.set_xlabel('F1-Score', fontsize=15)
ax2.set_ylabel('Density', fontsize=15)
ax2.set_xlim([0,1])
ax2.set_title('Model %s Performance: Dice' % (args.data.capitalize()), fontsize=20)
plt.tight_layout()
fig_file = '%s%s.accuracies.%s.png' % (pred_dir, schema['model'], args.data)
plt.savefig(fig_file, bbox_inches='tight', transparent=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform model testing on JK-GAT models.')
parser.add_argument('--schema-file',
type=str,
help='JSON file with parameters for model, training, and output.')
parser.add_argument('--data',
type=str,
help='Assess model performance on test or validation set.',
default='testing',
choices=['testing', 'train', 'validation'],
required=False)
parser.add_argument('-no_background',
help='Exclude background voxels in accuracy calculation.',
action='store_true',
required=False)
args = parser.parse_args()
main(args)
|
|
import django
from django.core import mail
from django.test import TestCase
try:
try:
from django.utils import unittest
except PendingDeprecationWarning:
import unittest
except ImportError:
import unittest # NOQA
try:
from django.test.utils import override_settings
except ImportError:
def override_settings(*args, **kwargs):
return unittest.skipIf(django.get_version().startswith('1.3'), "Django < 1.4 doesn't have override_settings")
from django.core.exceptions import ImproperlyConfigured
from emailtools import BaseEmail, BasicEmail, HTMLEmail, MarkdownEmail
class TestBasicCBE(TestCase):
EMAIL_ATTRS = {
'subject': 'test email',
'to': ['to@example.com'],
'from_email': 'from@example.com',
'body': 'This is a test email',
}
def setUp(self):
class TestEmail(BasicEmail):
subject = self.EMAIL_ATTRS['subject']
to = self.EMAIL_ATTRS['to']
from_email = self.EMAIL_ATTRS['from_email']
body = self.EMAIL_ATTRS['body']
self.TestEmail = TestEmail
def create_and_send_a_message(self, **kwargs):
email_callable = self.TestEmail.as_callable(**kwargs)
email_callable()
def test_mail_is_sent(self):
self.create_and_send_a_message()
self.assertEqual(len(mail.outbox), 1)
def test_create_message(self):
email_instance = self.TestEmail()
message = email_instance.get_email_message()
self.assertTrue(isinstance(message, self.TestEmail.email_message_class))
def test_to(self):
self.create_and_send_a_message()
message = mail.outbox[0]
for k, v in self.EMAIL_ATTRS.iteritems():
self.assertEqual(getattr(message, k), v)
self.assertEqual(message.bcc, [])
self.assertEqual(message.cc, [])
def test_settings_override(self):
self.create_and_send_a_message(
to=['overridden_to@example.com'],
cc=['overridden_cc@example.com'],
bcc=['overridden_bcc@example.com'],
subject='overridden_subject',
)
message = mail.outbox[0]
self.assertEqual(message.to, ['overridden_to@example.com'])
self.assertEqual(message.cc, ['overridden_cc@example.com'])
self.assertEqual(message.bcc, ['overridden_bcc@example.com'])
self.assertEqual(message.subject, 'overridden_subject')
def test_improper_settings_override(self):
with self.assertRaises(TypeError):
self.create_and_send_a_message(not_a_class_property=True)
def test_to_address_as_string(self):
self.create_and_send_a_message(to='string@example.com')
message = mail.outbox[0]
self.assertEqual(message.to, ['string@example.com'])
def test_missing_message_class(self):
class TestEmail(BaseEmail):
subject = self.EMAIL_ATTRS['subject']
to = self.EMAIL_ATTRS['to']
from_email = self.EMAIL_ATTRS['from_email']
body = self.EMAIL_ATTRS['body']
with self.assertRaises(ImproperlyConfigured):
TestEmail.as_callable()()
def test_missing_to(self):
with self.assertRaises(ImproperlyConfigured):
self.create_and_send_a_message(to=None)()
def test_missing_subject(self):
with self.assertRaises(ImproperlyConfigured):
self.create_and_send_a_message(subject=None)()
def test_missing_body(self):
with self.assertRaises(ImproperlyConfigured):
self.create_and_send_a_message(body=None)()
@unittest.expectedFailure
def test_sending_kwargs(self):
class SendingKwargsEmail(self.TestEmail):
from_email = 'with\nnewline@gmail.com'
fail_silently = True
SendingKwargsEmail.as_callable(fail_silently=True)()
with self.assertRaises(mail.BadHeaderError):
SendingKwargsEmail.as_callable()()
def test_basic_init_overide(self):
class TestEmail(self.TestEmail):
def __init__(self, x):
super(TestEmail, self).__init__(x)
TestEmail('arst')
TestEmail(x='arst')
with self.assertRaises(TypeError):
TestEmail()
with self.assertRaises(TypeError):
TestEmail('arst', 'tsra')
def test_extra_headers(self):
class TestEmail(self.TestEmail):
headers = {
'Test-Header': 'foo',
}
message = TestEmail().get_email_message().message()
self.assertEqual(message['Test-Header'], 'foo')
class TestHTMLEmail(TestCase):
EMAIL_ATTRS = {
'subject': 'test email',
'to': ['to@example.com'],
'from_email': 'from@example.com',
'template_name': 'tests/test_HTMLEmail_template.html',
}
def setUp(self):
class TestHTMLEmail(HTMLEmail):
subject = self.EMAIL_ATTRS['subject']
to = self.EMAIL_ATTRS['to']
from_email = self.EMAIL_ATTRS['from_email']
template_name = self.EMAIL_ATTRS['template_name']
def get_context_data(self, **kwargs):
kwargs = super(TestHTMLEmail, self).get_context_data(**kwargs)
kwargs.update({
'title': 'test title',
'content': 'test content',
})
return kwargs
self.TestHTMLEmail = TestHTMLEmail
def create_and_send_a_message(self, **kwargs):
email_callable = self.TestHTMLEmail.as_callable(**kwargs)
email_callable()
def test_mail_is_sent(self):
self.create_and_send_a_message()
self.assertEqual(len(mail.outbox), 1)
def test_mail_has_html_body(self):
self.create_and_send_a_message()
message = mail.outbox[0]
self.assertTrue(message.alternatives)
self.assertEqual(message.alternatives[0][1], 'text/html')
@unittest.skipIf(django.get_version().startswith('1.3'), "Django < 1.4 doesn't allow assertTemplateUsed as a context manager")
def test_template_used(self):
with self.assertTemplateUsed(template_name=self.EMAIL_ATTRS['template_name']):
self.create_and_send_a_message()
def test_html_body(self):
self.create_and_send_a_message()
message = mail.outbox[0]
html_body = message.alternatives[0][0]
try:
self.assertInHTML('<h1>test title</h1>', html_body)
self.assertInHTML('<p>test content</p>', html_body)
except AttributeError: # support for < django 1.5
self.assertIn('<h1>test title</h1>', html_body)
self.assertIn('<p>test content</p>', html_body)
def test_plain_body(self):
self.create_and_send_a_message()
message = mail.outbox[0]
self.assertIn('test title', message.body)
self.assertIn('test content', message.body)
self.assertNotIn('<h1>', message.body)
self.assertNotIn('<p>', message.body)
class TestMarkdownEmail(TestHTMLEmail):
EMAIL_ATTRS = {
'subject': 'test email',
'to': ['to@example.com'],
'from_email': 'from@example.com',
'template_name': 'tests/test_MarkdownEmail_template.md',
}
def setUp(self):
class TestMarkdownEmail(MarkdownEmail):
layout_template = 'mail/base.html'
subject = self.EMAIL_ATTRS['subject']
to = self.EMAIL_ATTRS['to']
from_email = self.EMAIL_ATTRS['from_email']
template_name = self.EMAIL_ATTRS['template_name']
def get_context_data(self, **kwargs):
kwargs = super(TestMarkdownEmail, self).get_context_data(**kwargs)
kwargs.update({
'title': 'test title',
'content': 'test content',
})
return kwargs
self.TestMarkdownEmail = TestMarkdownEmail
def create_and_send_a_message(self, **kwargs):
email_callable = self.TestMarkdownEmail.as_callable(**kwargs)
email_callable()
@override_settings(EMAIL_LAYOUT=None)
def test_missing_base_layout(self):
self.create_and_send_a_message()
with self.assertRaises(ImproperlyConfigured):
self.create_and_send_a_message(layout_template=None)
|
|
from __future__ import division
import numpy as np
from itertools import combinations_with_replacement
from .core import Data, Summary, Propensity, PropensitySelect, Strata
from .estimators import OLS, Blocking, Weighting, Matching, Estimators
class CausalModel(object):
"""
Class that provides the main tools of Causal Inference.
"""
def __init__(self, Y, D, X):
self.old_data = Data(Y, D, X)
self.reset()
def reset(self):
"""
Reinitializes data to original inputs, and drops any estimated
results.
"""
Y, D, X = self.old_data['Y'], self.old_data['D'], self.old_data['X']
self.raw_data = Data(Y, D, X)
self.summary_stats = Summary(self.raw_data)
self.propensity = None
self.cutoff = None
self.blocks = None
self.strata = None
self.estimates = Estimators()
def est_propensity(self, lin='all', qua=None):
"""
Estimates the propensity scores given list of covariates to
include linearly or quadratically.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
Parameters
----------
lin: string or list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to the string 'all', which
uses whole covariate matrix.
qua: list, optional
Tuples indicating which columns of the original
covariate matrix to multiply and include. E.g.,
[(1,1), (2,3)] indicates squaring the 2nd column
and including the product of the 3rd and 4th
columns. Default is to not include any
quadratic terms.
"""
lin_terms = parse_lin_terms(self.raw_data['K'], lin)
qua_terms = parse_qua_terms(self.raw_data['K'], qua)
self.propensity = Propensity(self.raw_data, lin_terms, qua_terms)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
"""
Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
lin_basic = parse_lin_terms(self.raw_data['K'], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.')
def trim_s(self):
"""
Trims data based on propensity score using the cutoff
selection algorithm suggested by [1]_.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] Crump, R., Hotz, V., Imbens, G., & Mitnik, O. (2009).
Dealing with Limited Overlap in Estimation of
Average Treatment Effects. Biometrika, 96, 187-199.
"""
pscore = self.raw_data['pscore']
g = 1.0/(pscore*(1-pscore)) # 1 over Bernoulli variance
self.cutoff = select_cutoff(g)
self.trim()
def stratify(self):
"""
Stratifies the sample based on propensity score.
By default the sample is divided into five equal-sized bins.
The number of bins can be set by modifying the object
attribute named blocks. Alternatively, custom-sized bins can
be created by setting blocks equal to a sorted list of numbers
between 0 and 1 indicating the bin boundaries.
This method should only be executed after the propensity score
has been estimated.
"""
Y, D, X = self.raw_data['Y'], self.raw_data['D'], self.raw_data['X']
pscore = self.raw_data['pscore']
if isinstance(self.blocks, int):
blocks = split_equal_bins(pscore, self.blocks)
else:
blocks = self.blocks[:] # make a copy; should be sorted
blocks[0] = 0 # avoids always dropping 1st unit
def subset(p_low, p_high):
return (p_low < pscore) & (pscore <= p_high)
subsets = [subset(*ps) for ps in zip(blocks, blocks[1:])]
strata = [CausalModel(Y[s], D[s], X[s]) for s in subsets]
self.strata = Strata(strata, subsets, pscore)
def stratify_s(self):
"""
Stratifies the sample based on propensity score using the
bin selection procedure suggested by [1]_.
The bin selection algorithm is based on a sequence of
two-sample t tests performed on the log-odds ratio.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
pscore_order = self.raw_data['pscore'].argsort()
pscore = self.raw_data['pscore'][pscore_order]
D = self.raw_data['D'][pscore_order]
logodds = np.log(pscore / (1-pscore))
K = self.raw_data['K']
blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1))
self.blocks = sorted(blocks_uniq)
self.stratify()
def est_via_ols(self, adj=2):
"""
Estimates average treatment effects using least squares.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed. Set adj = 0 to not include any
covariates. Set adj = 1 to include treatment
indicator D and covariates X separately. Set
adj = 2 to additionally include interaction
terms between D and X. Defaults to 2.
"""
self.estimates['ols'] = OLS(self.raw_data, adj)
def est_via_blocking(self, adj=1):
"""
Estimates average treatment effects using regression within
blocks.
This method should only be executed after the sample has been
stratified.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed for each within-bin regression.
Set adj = 0 to not include any covariates.
Set adj = 1 to include treatment indicator D
and covariates X separately. Set adj = 2 to
additionally include interaction terms between
D and X. Defaults to 1.
"""
self.estimates['blocking'] = Blocking(self.strata, adj)
def est_via_weighting(self):
"""
Estimates average treatment effects using doubly-robust
version of the Horvitz-Thompson weighting estimator.
"""
self.estimates['weighting'] = Weighting(self.raw_data)
def est_via_matching(self, weights='inv', matches=1, bias_adj=False):
"""
Estimates average treatment effects using nearest-
neighborhood matching.
Matching is done with replacement. Method supports multiple
matching. Correcting bias that arise due to imperfect matches
is also supported. For details on methodology, see [1]_.
Parameters
----------
weights: str or positive definite square matrix
Specifies weighting matrix used in computing
distance measures. Defaults to string 'inv',
which does inverse variance weighting. String
'maha' gives the weighting matrix used in the
Mahalanobis metric.
matches: int
Number of matches to use for each subject.
bias_adj: bool
Specifies whether bias adjustments should be
attempted.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
X, K = self.raw_data['X'], self.raw_data['K']
X_c, X_t = self.raw_data['X_c'], self.raw_data['X_t']
if weights == 'inv':
W = 1/X.var(0)
elif weights == 'maha':
V_c = np.cov(X_c, rowvar=False, ddof=0)
V_t = np.cov(X_t, rowvar=False, ddof=0)
if K == 1:
W = 1/np.array([[(V_c+V_t)/2]]) # matrix form
else:
W = np.linalg.inv((V_c+V_t)/2)
else:
W = weights
self.estimates['matching'] = Matching(self.raw_data, W,
matches, bias_adj)
def _post_pscore_init(self):
self.cutoff = 0.1
self.blocks = 5
def parse_lin_terms(K, lin):
if lin is None:
return []
elif lin == 'all':
return range(K)
else:
return lin
def parse_qua_terms(K, qua):
if qua is None:
return []
elif qua == 'all':
return list(combinations_with_replacement(range(K), 2))
else:
return qua
def sumlessthan(g, sorted_g, cumsum):
deduped_values = dict(zip(sorted_g, cumsum))
return np.array([deduped_values[x] for x in g])
def select_cutoff(g):
if g.max() <= 2*g.mean():
cutoff = 0
else:
sorted_g = np.sort(g)
cumsum_1 = range(1, len(g)+1)
LHS = g * sumlessthan(g, sorted_g, cumsum_1)
cumsum_g = np.cumsum(sorted_g)
RHS = 2 * sumlessthan(g, sorted_g, cumsum_g)
gamma = np.max(g[LHS <= RHS])
cutoff = 0.5 - np.sqrt(0.25 - 1./gamma)
return cutoff
def split_equal_bins(pscore, blocks):
q = np.linspace(0, 100, blocks+1)[1:-1] # q as in qth centiles
centiles = [np.percentile(pscore, x) for x in q]
return [0] + centiles + [1]
def calc_tstat(sample_c, sample_t):
N_c = sample_c.shape[0]
N_t = sample_t.shape[0]
var_c = sample_c.var(ddof=1)
var_t = sample_t.var(ddof=1)
return (sample_t.mean()-sample_c.mean()) / np.sqrt(var_c/N_c+var_t/N_t)
def calc_sample_sizes(D):
N = D.shape[0]
mid_index = N // 2
Nleft = mid_index
Nleft_t = D[:mid_index].sum()
Nleft_c = Nleft - Nleft_t
Nright = N - Nleft
Nright_t = D[mid_index:].sum()
Nright_c = Nright - Nright_t
return (Nleft_c, Nleft_t, Nright_c, Nright_t)
def select_blocks(pscore, logodds, D, K, p_low, p_high):
scope = (pscore >= p_low) & (pscore <= p_high)
c, t = (scope & (D==0)), (scope & (D==1))
Nleft_c, Nleft_t, Nright_c, Nright_t = calc_sample_sizes(D[scope])
if min(Nleft_c, Nleft_t, Nright_c, Nright_t) < K+1:
return [p_low, p_high]
tstat = calc_tstat(logodds[c], logodds[t])
if tstat <= 1.96:
return [p_low, p_high]
low = pscore[scope][0]
mid = pscore[scope][scope.sum() // 2]
high = pscore[scope][-1]
return select_blocks(pscore, logodds, D, K, low, mid) + \
select_blocks(pscore, logodds, D, K, mid, high)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.models.utils."""
from absl import flags
from absl.testing import parameterized
import numpy as np
from kws_streaming.layers import modes
from kws_streaming.layers import test_utils
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.models import model_flags
from kws_streaming.models import model_params
from kws_streaming.models import models
from kws_streaming.models import utils
tf1.disable_eager_execution()
FLAGS = flags.FLAGS
class SequentialModel(tf.keras.Model):
"""Dummy sequential model to test conversion to functional model."""
def __init__(self,
num_outputs,):
"""Initialize dummy model.
Args:
num_outputs: Number of outputs.
"""
super().__init__()
self._model = {}
self._model['model1'] = tf.keras.Sequential(
layers=[tf.keras.layers.Dense(units=num_outputs, activation=None)],
name='model1')
self._model['model2'] = tf.keras.Sequential(
layers=[tf.keras.layers.GlobalMaxPooling2D()], name='model2')
def call(self, inputs):
net = inputs
net = self._model['model1'](net)
net = self._model['model2'](net)
return net
# two models are tested with all cobinations of speech frontend
# and all models are tested with one frontend
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def _testTFLite(self,
preprocess='raw',
feature_type='mfcc_tf',
use_tf_fft=False,
model_name='svdf'):
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params.clip_duration_ms = 100 # make it shorter for testing
# set parameters to test
params.preprocess = preprocess
params.feature_type = feature_type
params.use_tf_fft = use_tf_fft
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
# convert TF non streaming model to TFLite non streaming inference
self.assertTrue(
utils.model_to_tflite(self.sess, model, params,
modes.Modes.NON_STREAM_INFERENCE))
def setUp(self):
super(UtilsTest, self).setUp()
tf1.reset_default_graph()
config = tf1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf1.Session(config=config)
tf1.keras.backend.set_session(self.sess)
@parameterized.named_parameters([
{
'testcase_name': 'raw with mfcc_tf not use tf func',
'preprocess': 'raw',
'feature_type': 'mfcc_tf',
'use_tf_fft': False
},
{
'testcase_name': 'raw with mfcc_tf use tf func',
'preprocess': 'raw',
'feature_type': 'mfcc_tf',
'use_tf_fft': True,
},
{
'testcase_name': 'raw with mfcc_op',
'preprocess': 'raw',
'feature_type': 'mfcc_op',
'use_tf_fft': False, # will be ignored
},
{
'testcase_name': 'mfcc',
'preprocess': 'mfcc',
'feature_type': 'mfcc_op', # will be ignored
'use_tf_fft': False # will be ignored
},
{
'testcase_name': 'micro',
'preprocess': 'micro',
'feature_type': 'mfcc_op', # will be ignored
'use_tf_fft': False # will be ignored
}
])
def testPreprocessNonStreamInferenceTFandTFLite(self,
preprocess,
feature_type,
use_tf_fft,
model_name='svdf'):
# Validate that model with different preprocessing
# can be converted to non stream inference mode.
self._testTFLite(preprocess, feature_type, use_tf_fft, model_name)
@parameterized.named_parameters([
{
'testcase_name': 'raw with mfcc_tf',
'preprocess': 'raw',
'feature_type': 'mfcc_tf'
},
{
'testcase_name': 'raw with mfcc_op',
'preprocess': 'raw',
'feature_type': 'mfcc_op'
},
{
'testcase_name': 'mfcc',
'preprocess': 'mfcc',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
{
'testcase_name': 'micro',
'preprocess': 'micro',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
])
def testPreprocessStreamInferenceModeTFandTFLite(self,
preprocess,
feature_type,
model_name='gru'):
# Validate that model with different preprocessing
# can be converted to stream inference mode with TF and TFLite.
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
# set parameters to test
params.preprocess = preprocess
params.feature_type = feature_type
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
# convert TF non streaming model to TFLite streaming inference
# with external states
self.assertTrue(
utils.model_to_tflite(self.sess, model, params,
modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE))
# convert TF non streaming model to TF streaming with external states
self.assertTrue(
utils.to_streaming_inference(
model, params, modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE))
# convert TF non streaming model to TF streaming with internal states
self.assertTrue(
utils.to_streaming_inference(
model, params, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE))
def test_model_to_saved(self, model_name='dnn'):
"""SavedModel supports both stateless and stateful graphs."""
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
utils.model_to_saved(model, params, FLAGS.test_tmpdir)
def testNextPowerOfTwo(self):
self.assertEqual(utils.next_power_of_two(11), 16)
@parameterized.parameters('att_mh_rnn', 'att_rnn', 'dnn', 'ds_cnn', 'cnn',
'tc_resnet', 'crnn', 'gru', 'lstm', 'svdf',
'mobilenet', 'mobilenet_v2', 'xception',
'inception', 'inception_resnet', 'ds_tc_resnet')
def testNonStreamInferenceTFandTFLite(self, model_name='ds_cnn'):
# Validate that all models with selected preprocessing
# can be converted to non stream inference mode.
self._testTFLite(model_name=model_name)
@parameterized.parameters(
'cnn_stride',
'cnn',
'crnn',
'dnn',
'ds_tc_resnet',
'gru',
'lstm',
'svdf',
'bc_resnet'
)
def test_external_streaming_shapes(self, model_name):
model = utils.get_model_with_default_params(
model_name, mode=modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE)
# The first 'n' inputs correspond to the 'n' inputs that the model takes
# in non-streaming mode. The rest of the input tensors represent the
# internal states for each layer in the model.
inputs = [np.zeros(shape, dtype=np.float32) for shape in model.input_shapes]
outputs = model.predict(inputs)
for output, expected_shape in zip(outputs, model.output_shapes):
self.assertEqual(output.shape, expected_shape)
def test_sequential_to_functional(self):
# prepare input data
test_utils.set_seed(1)
batch_input_shape = (1, 4, 2, 2)
input_data = np.random.rand(np.prod(batch_input_shape))
input_data = np.reshape(input_data, batch_input_shape)
# create sequential model
inputs = tf.keras.Input(batch_input_shape=batch_input_shape)
net = SequentialModel(2)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=net)
model.summary()
# convert keras sequential model to functional and compare them
func_model = utils.sequential_to_functional(model)
func_model.summary()
self.assertAllClose(
model.predict(input_data), func_model.predict(input_data))
if __name__ == '__main__':
tf.test.main()
|
|
import json
from django.utils.encoding import force_bytes
from email.utils import formatdate
from urllib.parse import parse_qsl
from time import time
from services.utils import (
get_cdn_url, log_configure, mypool, settings, PLATFORM_NAMES_TO_CONSTANTS)
# This has to be imported after the settings so statsd knows where to log to.
from django_statsd.clients import statsd
try:
from compare import version_int
except ImportError:
from olympia.versions.compare import version_int
from olympia.constants import applications, base
import olympia.core.logger
# Go configure the log.
log_configure()
log = olympia.core.logger.getLogger('z.services')
class Update(object):
def __init__(self, data, compat_mode='strict'):
self.conn, self.cursor = None, None
self.data = data.copy()
self.data['row'] = {}
self.version_int = 0
self.compat_mode = compat_mode
def is_valid(self):
# If you accessing this from unit tests, then before calling
# is valid, you can assign your own cursor.
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
data = self.data
# Version can be blank.
data['version'] = data.get('version', '')
for field in ['reqVersion', 'id', 'appID', 'appVersion']:
if field not in data:
return False
app = applications.APP_GUIDS.get(data['appID'])
if not app:
return False
data['app_id'] = app.id
sql = """SELECT id, status, addontype_id, guid FROM addons
WHERE guid = %(guid)s AND
inactive = 0 AND
status NOT IN (%(STATUS_DELETED)s, %(STATUS_DISABLED)s)
LIMIT 1;"""
self.cursor.execute(sql, {
'guid': self.data['id'],
'STATUS_DELETED': base.STATUS_DELETED,
'STATUS_DISABLED': base.STATUS_DISABLED,
})
result = self.cursor.fetchone()
if result is None:
return False
data['id'], data['addon_status'], data['type'], data['guid'] = result
data['version_int'] = version_int(data['appVersion'])
if 'appOS' in data:
for k, v in PLATFORM_NAMES_TO_CONSTANTS.items():
if k in data['appOS']:
data['appOS'] = v
break
else:
data['appOS'] = None
return True
def get_update(self):
data = self.data
data['STATUS_APPROVED'] = base.STATUS_APPROVED
data['RELEASE_CHANNEL_LISTED'] = base.RELEASE_CHANNEL_LISTED
sql = ["""
SELECT
addons.guid as guid, addons.addontype_id as type,
addons.inactive as disabled_by_user, appmin.version as min,
appmax.version as max, files.id as file_id,
files.status as file_status, files.hash,
files.filename, versions.id as version_id,
files.datestatuschanged as datestatuschanged,
files.strict_compatibility as strict_compat,
versions.releasenotes, versions.version as version
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
AND appmin.application_id = %(app_id)s
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
AND appmax.application_id = %(app_id)s
INNER JOIN files
ON files.version_id = versions.id AND (files.platform_id = 1
"""]
if data.get('appOS'):
sql.append(' OR files.platform_id = %(appOS)s')
sql.append("""
)
-- Find a reference to the user's current version, if it exists.
-- These should never be inner joins. We need results even if we
-- can't find the current version.
LEFT JOIN versions curver
ON curver.addon_id = addons.id AND curver.version = %(version)s
LEFT JOIN files curfile
ON curfile.version_id = curver.id
WHERE
versions.deleted = 0 AND
versions.channel = %(RELEASE_CHANNEL_LISTED)s AND
files.status = %(STATUS_APPROVED)s
""")
sql.append('AND appmin.version_int <= %(version_int)s ')
if self.compat_mode == 'ignore':
pass # no further SQL modification required.
elif self.compat_mode == 'normal':
# When file has strict_compatibility enabled, or file has binary
# components, default to compatible is disabled.
sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_min = applications.D2C_MIN_VERSIONS.get(data['app_id'])
if d2c_min:
data['d2c_min_version'] = version_int(d2c_min)
sql.append("AND appmax.version_int >= %(d2c_min_version)s ")
else: # Not defined or 'strict'.
sql.append('AND appmax.version_int >= %(version_int)s ')
sql.append('ORDER BY versions.id DESC LIMIT 1;')
self.cursor.execute(''.join(sql), data)
result = self.cursor.fetchone()
if result:
row = dict(zip([
'guid', 'type', 'disabled_by_user', 'min', 'max',
'file_id', 'file_status', 'hash', 'filename', 'version_id',
'datestatuschanged', 'strict_compat', 'releasenotes',
'version'],
list(result)))
row['type'] = base.ADDON_SLUGS_UPDATE[row['type']]
row['url'] = get_cdn_url(data['id'], row)
row['appguid'] = applications.APPS_ALL[data['app_id']].guid
data['row'] = row
return True
return False
def get_output(self):
if self.is_valid():
if self.get_update():
contents = self.get_success_output()
else:
contents = self.get_no_updates_output()
else:
contents = self.get_error_output()
self.cursor.close()
if self.conn:
self.conn.close()
return json.dumps(contents)
def get_error_output(self):
return {}
def get_no_updates_output(self):
return {
'addons': {
self.data['guid']: {
'updates': []
}
}
}
def get_success_output(self):
data = self.data['row']
update = {
'version': data['version'],
'update_link': data['url'],
'applications': {
'gecko': {
'strict_min_version': data['min']
}
}
}
if data['strict_compat']:
update['applications']['gecko']['strict_max_version'] = data['max']
if data['hash']:
update['update_hash'] = data['hash']
if data['releasenotes']:
update['update_info_url'] = '%s%s%s/%%APP_LOCALE%%/' % (
settings.SITE_URL, '/versions/updateInfo/', data['version_id'])
return {
'addons': {
self.data['guid']: {
'updates': [update]
}
}
}
def format_date(self, secs):
return '%s GMT' % formatdate(time() + secs)[:25]
def get_headers(self, length):
content_type = 'application/json'
return [('Content-Type', content_type),
('Cache-Control', 'public, max-age=3600'),
('Last-Modified', self.format_date(0)),
('Expires', self.format_date(3600)),
('Content-Length', str(length))]
def application(environ, start_response):
status = '200 OK'
with statsd.timer('services.update'):
data = dict(parse_qsl(environ['QUERY_STRING']))
compat_mode = data.pop('compatMode', 'strict')
try:
update = Update(data, compat_mode)
output = force_bytes(update.get_output())
start_response(status, update.get_headers(len(output)))
except Exception as e:
log.exception(e)
raise
return [output]
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.forms import ChoiceField, widgets
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
if 'cmsplugin_cascade' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Please add 'cmsplugin_cascade' to your INSTALLED_APPS")
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.plugin_base import CascadePluginBase
from cmsplugin_cascade.link.forms import LinkForm
from cmsplugin_cascade.link.plugin_base import LinkPluginBase, LinkElementMixin
from django_select2.forms import HeavySelect2Widget
from shop.conf import app_settings
from shop.forms.base import DialogFormMixin
from shop.models.cart import CartModel
from shop.models.product import ProductModel
class ShopPluginBase(CascadePluginBase):
module = "Shop"
require_parent = False
allow_children = False
@python_2_unicode_compatible
class ShopLinkElementMixin(LinkElementMixin):
def __str__(self):
return self.plugin_class.get_identifier(self)
class ShopLinkPluginBase(ShopPluginBase):
"""
Base plugin for arbitrary buttons used during various checkout pages.
"""
allow_children = False
parent_classes = []
require_parent = False
ring_plugin = 'ShopLinkPlugin'
class Media:
js = ['cascade/js/admin/linkplugin.js', 'shop/js/admin/shoplinkplugin.js']
@classmethod
def get_link(cls, obj):
link = obj.glossary.get('link', {})
if link.get('type') == 'cmspage':
if 'model' in link and 'pk' in link:
if not hasattr(obj, '_link_model'):
Model = apps.get_model(*link['model'].split('.'))
try:
obj._link_model = Model.objects.get(pk=link['pk'])
except Model.DoesNotExist:
obj._link_model = None
if obj._link_model:
return obj._link_model.get_absolute_url()
else:
# use the link type as special action keyword
return link.get('type')
class ShopButtonPluginBase(ShopLinkPluginBase):
"""
Base plugin for arbitrary buttons used during various checkout pages.
"""
fields = ('link_content', ('link_type', 'cms_page', 'section',), 'glossary',)
class Media:
css = {'all': ('cascade/css/admin/bootstrap.min.css', 'cascade/css/admin/bootstrap-theme.min.css',)}
@classmethod
def get_identifier(cls, instance):
return mark_safe(instance.glossary.get('link_content', ''))
class ProductSelect2Widget(HeavySelect2Widget):
def render(self, name, value, attrs=None):
try:
result = app_settings.PRODUCT_SELECT_SERIALIZER(ProductModel.objects.get(pk=value))
except (ProductModel.DoesNotExist, ValueError):
pass
else:
self.choices.append((value, result.data['text']),)
html = super(ProductSelect2Widget, self).render(name, value, attrs=attrs)
return html
class ProductSelectField(ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('widget', ProductSelect2Widget(data_view='shop:select-product'))
super(ProductSelectField, self).__init__(*args, **kwargs)
def clean(self, value):
"Since the ProductSelectField does not specify choices by itself, accept any returned value"
try:
return int(value)
except (TypeError, ValueError):
pass
class CatalogLinkForm(LinkForm):
"""
Alternative implementation of `cmsplugin_cascade.TextLinkForm`, which allows to link onto
the Product model, using its method ``get_absolute_url``.
Note: In this form class the field ``product`` is missing. It is added later, when the shop's
Product knows about its materialized model.
"""
LINK_TYPE_CHOICES = (('cmspage', _("CMS Page")), ('product', _("Product")),
('exturl', _("External URL")), ('email', _("Mail To")),)
product = ProductSelectField(required=False, label='',
help_text=_("An internal link onto a product from the shop"))
def clean_product(self):
if self.cleaned_data.get('link_type') == 'product':
app_label = ProductModel._meta.app_label
self.cleaned_data['link_data'] = {
'type': 'product',
'model': '{0}.{1}'.format(app_label, ProductModel.__name__),
'pk': self.cleaned_data['product'],
}
def set_initial_product(self, initial):
try:
# check if that product still exists, otherwise return nothing
Model = apps.get_model(*initial['link']['model'].split('.'))
initial['product'] = Model.objects.get(pk=initial['link']['pk']).pk
except (KeyError, ValueError, ObjectDoesNotExist):
pass
class CatalogLinkPluginBase(LinkPluginBase):
"""
Alternative implementation to ``cmsplugin_cascade.link.DefaultLinkPluginBase`` which adds
another link type, namely "Product", to set links onto arbitrary products of this shop.
"""
fields = (('link_type', 'cms_page', 'section', 'product', 'ext_url', 'mail_to',), 'glossary',)
ring_plugin = 'ShopLinkPlugin'
class Media:
css = {'all': ['shop/css/admin/editplugin.css']}
js = ['shop/js/admin/shoplinkplugin.js']
class DialogFormPluginBase(ShopPluginBase):
"""
Base class for all plugins adding a dialog form to a placeholder field.
"""
require_parent = True
parent_classes = ('BootstrapColumnPlugin', 'ProcessStepPlugin', 'BootstrapPanelPlugin',
'SegmentPlugin', 'SimpleWrapperPlugin', 'ValidateSetOfFormsPlugin')
RENDER_CHOICES = [('form', _("Form dialog")), ('summary', _("Static summary"))]
render_type = GlossaryField(
widgets.RadioSelect(choices=RENDER_CHOICES),
label=_("Render as"),
initial='form',
help_text=_("A dialog can also be rendered as a box containing a read-only summary."),
)
headline_legend = GlossaryField(
widgets.CheckboxInput(),
label=_("Headline Legend"),
initial=True,
help_text=_("Render a legend inside the dialog's headline."),
)
@classmethod
def register_plugin(cls, plugin):
"""
Register plugins derived from this class with this function instead of
`plugin_pool.register_plugin`, so that dialog plugins without a corresponding
form class are not registered.
"""
if not issubclass(plugin, cls):
msg = "Can not register plugin class `{}`, since is does not inherit from `{}`."
raise ImproperlyConfigured(msg.format(plugin.__name__, cls.__name__))
plugin_pool.register_plugin(plugin)
def get_form_class(self, instance):
try:
return import_string(self.form_class)
except AttributeError:
msg = "Can not register plugin class '{}', since it neither defines 'form_class' " \
"nor overrides 'get_form_class()'."
raise ImproperlyConfigured(msg.format(self.__name__))
@classmethod
def get_identifier(cls, instance):
render_type = instance.glossary.get('render_type')
render_type = dict(cls.RENDER_CHOICES).get(render_type, '')
return format_html(pgettext_lazy('get_identifier', "as {}"), render_type)
def get_form_data(self, context, instance, placeholder):
"""
Returns data to initialize the corresponding dialog form.
This method must return a dictionary containing
* either `instance` - a Python object to initialize the form class for this plugin,
* or `initial` - a dictionary containing initial form data, or if both are set, values
from `initial` override those of `instance`.
"""
if issubclass(self.get_form_class(instance), DialogFormMixin):
try:
cart = CartModel.objects.get_from_request(context['request'])
cart.update(context['request'])
except CartModel.DoesNotExist:
cart = None
return {'cart': cart}
return {}
def get_render_template(self, context, instance, placeholder):
render_type = instance.glossary.get('render_type')
if render_type not in ('form', 'summary',):
render_type = 'form'
try:
template_names = [
'{0}/checkout/{1}'.format(app_settings.APP_LABEL, self.template_leaf_name).format(render_type),
'shop/checkout/{}'.format(self.template_leaf_name).format(render_type),
]
return select_template(template_names)
except (AttributeError, TemplateDoesNotExist):
return self.render_template
def render(self, context, instance, placeholder):
"""
Return the context to render a DialogFormPlugin
"""
request = context['request']
form_data = self.get_form_data(context, instance, placeholder)
request._plugin_order = getattr(request, '_plugin_order', 0) + 1
if not isinstance(form_data.get('initial'), dict):
form_data['initial'] = {}
form_data['initial'].update(plugin_id=instance.pk, plugin_order=request._plugin_order)
bound_form = self.get_form_class(instance)(**form_data)
context[bound_form.form_name] = bound_form
context['headline_legend'] = bool(instance.glossary.get('headline_legend', True))
return self.super(DialogFormPluginBase, self).render(context, instance, placeholder)
|
|
from sympy import (
Abs, Dummy, Eq, Gt,
LambertW, Piecewise, Poly, Rational, S, Symbol,
acos, atan, atanh, cos, erf, erfinv, erfc, erfcinv,
exp, log, pi, sin, sinh, sqrt, symbols,
tan, tanh, atan2, arg,
Lambda, imageset, cot, acot, I, EmptySet, Union, E, Interval, oo)
from sympy.core.function import nfloat
from sympy.functions.elementary.complexes import im, re
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys.rootoftools import RootOf
from sympy.sets import FiniteSet
from sympy.utilities.pytest import XFAIL, raises, skip
from sympy.utilities.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex,
_is_function_class_equation, invert_real, invert_complex, solveset)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
c = Symbol('c', real=True)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
q = Symbol('q', real=True)
m = Symbol('m', real=True)
n = Symbol('n', real=True)
def test_invert_real():
x = Symbol('x', real=True)
x = Dummy(real=True)
n = Symbol('n')
d = Dummy()
assert solveset(abs(x) - n, x) == solveset(abs(x) - d, x) == EmptySet()
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x, FiniteSet(log(y)))
assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, FiniteSet(log(y - 3)))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
assert invert_real(Abs(x), y, x) == (x, FiniteSet(-y, y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, FiniteSet(log(log(y)/log(2))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**Rational(1, 2), y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
assert invert_real(Abs(x**31 + x + 1), y, x) == (x**31 + x,
FiniteSet(-y - 1, y - 1))
assert invert_real(tan(x), y, x) == \
(x, imageset(Lambda(n, n*pi + atan(y)), S.Integers))
assert invert_real(tan(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + atan(y))), S.Integers))
assert invert_real(cot(x), y, x) == \
(x, imageset(Lambda(n, n*pi + acot(y)), S.Integers))
assert invert_real(cot(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + acot(y))), S.Integers))
assert invert_real(tan(tan(x)), y, x) == \
(tan(x), imageset(Lambda(n, n*pi + atan(y)), S.Integers))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_complex(exp(x), y, x) == \
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(S.One, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2) + sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(sin(x)) + sin(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x) - a, x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x + a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x*a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
a*tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**2 + sinh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2) + sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(sinh(x)) + sinh(x), x) is False
def test_garbage_input():
raises(ValueError, lambda: solveset_real([x], x))
raises(ValueError, lambda: solveset_real(x, pi))
raises(ValueError, lambda: solveset_real(x, x**2))
raises(ValueError, lambda: solveset_complex([x], x))
raises(ValueError, lambda: solveset_complex(x, pi))
def test_solve_mul():
assert solveset_real((a*x + b)*(exp(x) - 3), x) == \
FiniteSet(-b/a, log(3))
assert solveset_real((2*x + 8)*(8 + exp(x)), x) == FiniteSet(S(-4))
assert solveset_real(x/log(x), x) == EmptySet()
def test_solve_invert():
assert solveset_real(exp(x) - 3, x) == FiniteSet(log(3))
assert solveset_real(log(x) - 3, x) == FiniteSet(exp(3))
assert solveset_real(3**(x + 2), x) == FiniteSet()
assert solveset_real(3**(2 - x), x) == FiniteSet()
b = Symbol('b', positive=True)
y = Symbol('y', positive=True)
assert solveset_real(y - b*exp(a/x), x) == FiniteSet(a/log(y/b))
# issue 4504
assert solveset_real(2**x - 10, x) == FiniteSet(log(10)/log(2))
def test_errorinverses():
assert solveset_real(erf(x) - S.One/2, x) == \
FiniteSet(erfinv(S.One/2))
assert solveset_real(erfinv(x) - 2, x) == \
FiniteSet(erf(2))
assert solveset_real(erfc(x) - S.One, x) == \
FiniteSet(erfcinv(S.One))
assert solveset_real(erfcinv(x) - 2, x) == FiniteSet(erfc(2))
def test_solve_polynomial():
assert solveset_real(3*x - 2, x) == FiniteSet(Rational(2, 3))
assert solveset_real(x**2 - 1, x) == FiniteSet(-S(1), S(1))
assert solveset_real(x - y**3, x) == FiniteSet(y ** 3)
a11, a12, a21, a22, b1, b2 = symbols('a11, a12, a21, a22, b1, b2')
assert solveset_real(x**3 - 15*x - 4, x) == FiniteSet(
-2 + 3 ** Rational(1, 2),
S(4),
-2 - 3 ** Rational(1, 2))
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert len(solveset_real(x**5 + x**3 + 1, x)) == 1
assert len(solveset_real(-2*x**3 + 4*x**2 - 2*x + 6, x)) > 0
def test_return_root_of():
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == RootOf
# if one uses solve to get the roots of a polynomial that has a RootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get RootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(list(solveset_complex(x**5 + 3*x**3 + 7, x))[0],
exponent=False) == RootOf(x**5 + 3*x**3 + 7, 0).n()
sol = list(solveset_complex(x**6 - 2*x + 2, x))
assert all(isinstance(i, RootOf) for i in sol) and len(sol) == 6
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == RootOf
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert solveset_complex(s, x) == \
FiniteSet(*Poly(s*4, domain='ZZ').all_roots())
# XXX: this comparison should work without converting the FiniteSet to list
# See #7876
eq = x*(x - 1)**2*(x + 1)*(x**6 - x + 1)
assert list(solveset_complex(eq, x)) == \
list(FiniteSet(-1, 0, 1, RootOf(x**6 - x + 1, 0),
RootOf(x**6 - x + 1, 1),
RootOf(x**6 - x + 1, 2),
RootOf(x**6 - x + 1, 3),
RootOf(x**6 - x + 1, 4),
RootOf(x**6 - x + 1, 5)))
def test__has_rational_power():
from sympy.solvers.solveset import _has_rational_power
assert _has_rational_power(sqrt(2), x)[0] is False
assert _has_rational_power(x*sqrt(2), x)[0] is False
assert _has_rational_power(x**2*sqrt(x), x) == (True, 2)
assert _has_rational_power(sqrt(2)*x**(S(1)/3), x) == (True, 3)
assert _has_rational_power(sqrt(x)*x**(S(1)/3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(-S(1)/2, -S(1)/3)
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((-S.Half + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**(S(1)/5) - 9, x) == \
FiniteSet(-295244/S(59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, S(1)/3))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(S(1)/3)
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
assert sol == FiniteSet(*[S(5)/3 + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 + 40*re(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + S(5)/3 +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 + 40*im(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9)])
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
raises(NotImplementedError, lambda: solveset_real(eq, q))
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# By attempt to make Set.contains behave symbolically SetDifference on
# FiniteSet isn't working very well.
# Simple operations like `FiniteSet(a) - FiniteSet(-b)` raises `TypeError`
# The likely course of action will making such operations return
# SetDifference object. That will also change the expected output of
# the given tests. Till the SetDifference becomes well behaving again the
# following tests are kept as comments.
# # issue 4508
# assert solveset_complex(y - b*x/(a + x), x) == \
# FiniteSet(-a*y/(y - b))
#
# # issue 4507
# assert solveset_complex(y - b/(1 + a*x), x) == \
# FiniteSet((b - y)/(a*y))
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S(0), S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S(1) / 3) - 3), x) == \
FiniteSet(S(0), S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(-Rational(3, 2), S.Half)
@XFAIL
def test_uselogcombine_1():
assert solveset_real(log(x - 3) + log(x + 3), x) == \
FiniteSet(sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/x) - 3) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2)
@XFAIL
def test_uselogcombine_2():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
FiniteSet(-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z))))
def test_solve_abs():
assert solveset_real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
assert solveset_real(Abs(x - 7) - 8, x) == FiniteSet(-S(1), S(15))
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solveset_real(sinh(x) + sech(x)) == FiniteSet(
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half))
@XFAIL
def test_real_imag_splitting1():
a, b = symbols('a b', real=True, finite=True)
s = solveset_real(sqrt(a**2 + b**2) - 3, a)
assert s != S.EmptySet
# FiniteSet(-sqrt(-b**2 + 9), sqrt(-b**2 + 9))
# fails now because whether it is real or not depends
# on the value of b, e.g. b = 4 gives an imaginary value
def test_real_imag_splitting():
a, b = symbols('a b', real=True, finite=True)
assert solveset_real(sqrt(a**2 - b**2) - 3, a) == \
FiniteSet(-sqrt(b**2 + 9), sqrt(b**2 + 9))
def test_units():
assert solveset_real(1/x - 1/(2*cm), x) == FiniteSet(2*cm)
def test_solve_only_exp_1():
y = Symbol('y', positive=True, finite=True)
assert solveset_real(exp(x) - y, x) == FiniteSet(log(y))
@XFAIL
def test_only_exp_2():
assert solveset_real(exp(x) + exp(-x) - 4, x) == \
FiniteSet(log(-sqrt(3) + 2), log(sqrt(3) + 2))
assert solveset_real(exp(x) + exp(-x) - y, x) != S.EmptySet
# FiniteSet(log(y/2 - sqrt((y - 2)*(y + 2))/2),
# log(y/2 + sqrt((y - 2)*(y + 2))/2))
# fails now because whether it is real or not depends
# on whether y >= 2
@XFAIL
def test_only_exp_3():
assert solveset_real(exp(x/y)*exp(-z/y) - 2, y) == \
FiniteSet((x - z)/log(2))
assert solveset_real(sqrt(exp(x)) + sqrt(exp(-x)) - 4, x) == \
FiniteSet(2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2))
def test_atan2():
# The .inverse() method on atan2 works only if x.is_real is True and the
# second argument is a real constant
assert solveset_real(atan2(x, 2) - pi/3, x) == FiniteSet(2*sqrt(3))
def test_piecewise():
eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3
assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5))
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3)
)
y = Symbol('y', positive=True)
assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3)
def test_solveset_complex_polynomial():
from sympy.abc import x, a, b, c
assert solveset_complex(a*x**2 + b*x + c, x) == \
FiniteSet(-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a),
-b/(2*a) + sqrt(-4*a*c + b**2)/(2*a))
assert solveset_complex(x - y**3, y) == FiniteSet(
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2)
assert solveset_complex(x + 1/x - 1, x) == \
FiniteSet(Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2)
def test_sol_zero_complex():
# This should return the complex set after it is implemented
raises(NotImplementedError, lambda: solveset_complex(0, x))
def test_solveset_complex_rational():
assert solveset_complex((x - 1)*(x - I)/(x - 3), x) == \
FiniteSet(1, I)
assert solveset_complex((x - y**3)/((y**2)*sqrt(1 - y**2)), x) == \
FiniteSet(y**3)
assert solveset_complex(-x**2 - I, x) == \
FiniteSet(-sqrt(2)/2 + sqrt(2)*I/2, sqrt(2)/2 - sqrt(2)*I/2)
def test_solve_quintics():
skip("This test is too slow")
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 + 15*x + 12
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
def test_solveset_complex_exp():
from sympy.abc import x, n
assert solveset_complex(exp(x) - 1, x) == \
imageset(Lambda(n, I*2*n*pi), S.Integers)
assert solveset_complex(exp(x) - I, x) == \
imageset(Lambda(n, I*(2*n*pi + pi/2)), S.Integers)
def test_solve_complex_log():
assert solveset_complex(log(x), x) == FiniteSet(1)
assert solveset_complex(1 - log(a + 4*x**2), x) == \
FiniteSet(-sqrt(-a/4 + E/4), sqrt(-a/4 + E/4))
def test_solve_complex_sqrt():
assert solveset_complex(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_complex(sqrt(5*x + 6) - (2 + 2*I) - x, x) == \
FiniteSet(-S(2), 3 - 4*I)
assert solveset_complex(4*x*(1 - a * sqrt(x)), x) == \
FiniteSet(S(0), 1 / a ** 2)
def test_solveset_complex_tan():
s = solveset_complex(tan(x).rewrite(exp), x)
assert s == imageset(Lambda(n, pi*n), S.Integers) - \
imageset(Lambda(n, pi*n + pi/2), S.Integers)
def test_solve_trig():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
Union(imageset(Lambda(n, 2*pi*n), S.Integers),
imageset(Lambda(n, 2*pi*n + pi), S.Integers))
assert solveset_real(sin(x) - 1, x) == \
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers)
assert solveset_real(cos(x), x) == \
Union(imageset(Lambda(n, 2*pi*n - pi/2), S.Integers),
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers))
assert solveset_real(sin(x) + cos(x), x) == \
Union(imageset(Lambda(n, 2*n*pi - pi/4), S.Integers),
imageset(Lambda(n, 2*n*pi + 3*pi/4), S.Integers))
assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet
def test_solve_invalid_sol():
assert 0 not in solveset_real(sin(x)/x, x)
assert 0 not in solveset_complex((exp(x) - 1)/x, x)
def test_solve_complex_unsolvable():
raises(NotImplementedError, lambda: solveset_complex(cos(x) - S.Half, x))
@XFAIL
def test_solve_trig_simplified():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
imageset(Lambda(n, n*pi), S.Integers)
assert solveset_real(cos(x), x) == \
imageset(Lambda(n, n*pi + pi/2), S.Integers)
assert solveset_real(cos(x) + sin(x), x) == \
imageset(Lambda(n, n*pi - pi/4), S.Integers)
@XFAIL
def test_solve_lambert():
assert solveset_real(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset_real(x + 2**x, x) == \
FiniteSet(-LambertW(log(2))/log(2))
# issue 4739
assert solveset_real(exp(log(5)*x) - 2**x, x) == FiniteSet(0)
ans = solveset_real(3*x + 5 + 2**(-5*x + 3), x)
assert ans == FiniteSet(-Rational(5, 3) +
LambertW(-10240*2**(S(1)/3)*log(2)/3)/(5*log(2)))
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solveset_real(eq, x)
ans = FiniteSet((log(2401) +
5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1)
assert result == ans
assert solveset_real(eq.expand(), x) == result
assert solveset_real(5*x - 1 + 3*exp(2 - 7*x), x) == \
FiniteSet(Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7)
assert solveset_real(2*x + 5 + log(3*x - 2), x) == \
FiniteSet(Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2)
assert solveset_real(3*x + log(4*x), x) == \
FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset_complex(x**z*y**z - 2, z) == \
FiniteSet(log(2)/(log(x) + log(y)))
assert solveset_real(x**x - 2) == FiniteSet(exp(LambertW(log(2))))
a = Symbol('a')
assert solveset_real(-a*x + 2*x*log(x), x) == FiniteSet(exp(a/2))
a = Symbol('a', real=True)
assert solveset_real(a/x + exp(x/2), x) == \
FiniteSet(2*LambertW(-a/2))
assert solveset_real((a/x + exp(x/2)).diff(x), x) == \
FiniteSet(4*LambertW(sqrt(2)*sqrt(a)/4))
assert solveset_real(1/(1/x - y + exp(y)), x) == EmptySet()
# coverage test
p = Symbol('p', positive=True)
w = Symbol('w')
assert solveset_real((1/p + 1)**(p + 1), p) == EmptySet()
assert solveset_real(tanh(x + 3)*tanh(x - 3) - 1, x) == EmptySet()
assert solveset_real(2*x**w - 4*y**w, w) == \
solveset_real((x/y)**w - 2, w)
assert solveset_real((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*S.Exp1)/3)
assert solveset_real((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x) == \
FiniteSet(LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3)
assert solveset_real((x**2 - 2*x - 2).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3)
assert solveset_real(x*log(x) + 3*x + 1, x) == \
FiniteSet(exp(-3 + LambertW(-exp(3))))
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solveset_real(eq, x) == \
FiniteSet(LambertW(3*exp(-LambertW(3))))
assert solveset_real(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == \
FiniteSet(-((log(a**5) + LambertW(S(1)/3))/(3*log(a))))
p = symbols('p', positive=True)
assert solveset_real(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == \
FiniteSet(
log((-3**(S(1)/3) - 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((-3**(S(1)/3) + 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((3*LambertW(S(1)/3)/p**5)**(1/(3*log(p)))),) # checked numerically
# check collection
b = Symbol('b')
eq = 3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5)
assert solveset_real(eq, x) == FiniteSet(
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a))))
# issue 4271
assert solveset_real((a/x + exp(x/2)).diff(x, 2), x) == FiniteSet(
6*LambertW((-1)**(S(1)/3)*a**(S(1)/3)/3))
assert solveset_real(x**3 - 3**x, x) == \
FiniteSet(-3/log(3)*LambertW(-log(3)/3))
assert solveset_real(x**2 - 2**x, x) == FiniteSet(2)
assert solveset_real(-x**2 + 2**x, x) == FiniteSet(2)
assert solveset_real(3**cos(x) - cos(x)**3) == FiniteSet(
acos(-3*LambertW(-log(3)/3)/log(3)))
assert solveset_real(4**(x/2) - 2**(x/3), x) == FiniteSet(0)
assert solveset_real(5**(x/2) - 2**(x/3), x) == FiniteSet(0)
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solveset_real(5**(x/2) - 2**(3/x), x) == FiniteSet(-b, b)
def test_solveset():
x = Symbol('x', real=True)
raises(ValueError, lambda: solveset(x + y))
assert solveset(exp(x) - 1) == FiniteSet(0)
assert solveset(exp(x) - 1, x) == FiniteSet(0)
assert solveset(Eq(exp(x), 1), x) == FiniteSet(0)
assert solveset(x - 1 >= 0, x) == Interval(1, oo)
assert solveset(exp(x) - 1 >= 0, x) == Interval(0, oo)
x = Symbol('x')
assert solveset(exp(x) - 1, x) == imageset(Lambda(n, 2*I*pi*n), S.Integers)
assert solveset(Eq(exp(x), 1), x) == imageset(Lambda(n, 2*I*pi*n),
S.Integers)
def test_improve_coverage():
from sympy.solvers.solveset import _has_rational_power
x = Symbol('x', real=True)
y = exp(x+1/x**2)
raises(NotImplementedError, lambda: solveset(y**2+y, x))
assert _has_rational_power(sin(x)*exp(x) + 1, x) == (False, S.One)
assert _has_rational_power((sin(x)**2)*(exp(x) + 1)**3, x) == (False, S.One)
|
|
"""The tests for the MQTT binary sensor platform."""
import json
import unittest
from unittest.mock import ANY, Mock, patch
from datetime import timedelta
import homeassistant.core as ha
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.components import binary_sensor, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNAVAILABLE
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, fire_mqtt_message, async_fire_mqtt_message,
fire_time_changed, mock_component, mock_mqtt_component, mock_registry,
async_mock_mqtt_component, MockConfigEntry)
class TestSensorMQTT(unittest.TestCase):
"""Test the MQTT sensor."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config_entries._async_schedule_save = Mock()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setting_sensor_value_via_mqtt_message(self):
"""Test the setting of the value via MQTT."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF',
}
})
state = self.hass.states.get('binary_sensor.test')
assert STATE_OFF == state.state
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_ON == state.state
fire_mqtt_message(self.hass, 'test-topic', 'OFF')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_OFF == state.state
def test_setting_sensor_value_via_mqtt_message_and_template(self):
"""Test the setting of the value via MQTT."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF',
'value_template': '{%if is_state(entity_id,\"on\")-%}OFF'
'{%-else-%}ON{%-endif%}'
}
})
state = self.hass.states.get('binary_sensor.test')
assert STATE_OFF == state.state
fire_mqtt_message(self.hass, 'test-topic', '')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_ON == state.state
fire_mqtt_message(self.hass, 'test-topic', '')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_OFF == state.state
def test_valid_device_class(self):
"""Test the setting of a valid sensor class."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'device_class': 'motion',
'state_topic': 'test-topic',
}
})
state = self.hass.states.get('binary_sensor.test')
assert 'motion' == state.attributes.get('device_class')
def test_invalid_device_class(self):
"""Test the setting of an invalid sensor class."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'device_class': 'abc123',
'state_topic': 'test-topic',
}
})
state = self.hass.states.get('binary_sensor.test')
assert state is None
def test_availability_without_topic(self):
"""Test availability without defined availability topic."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
}
})
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE != state.state
def test_availability_by_defaults(self):
"""Test availability by defaults with defined topic."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'availability_topic': 'availability-topic'
}
})
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE == state.state
fire_mqtt_message(self.hass, 'availability-topic', 'online')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE != state.state
fire_mqtt_message(self.hass, 'availability-topic', 'offline')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE == state.state
def test_availability_by_custom_payload(self):
"""Test availability by custom payload with defined topic."""
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'availability_topic': 'availability-topic',
'payload_available': 'good',
'payload_not_available': 'nogood'
}
})
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE == state.state
fire_mqtt_message(self.hass, 'availability-topic', 'good')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE != state.state
fire_mqtt_message(self.hass, 'availability-topic', 'nogood')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_UNAVAILABLE == state.state
def test_force_update_disabled(self):
"""Test force update option."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF'
}
})
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
assert 1 == len(events)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
assert 1 == len(events)
def test_force_update_enabled(self):
"""Test force update option."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF',
'force_update': True
}
})
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
assert 1 == len(events)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
assert 2 == len(events)
def test_off_delay(self):
"""Test off_delay option."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF',
'off_delay': 30,
'force_update': True
}
})
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_ON == state.state
assert 1 == len(events)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_ON == state.state
assert 2 == len(events)
fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=30))
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert STATE_OFF == state.state
assert 3 == len(events)
def test_setting_sensor_attribute_via_mqtt_json_message(self):
"""Test the setting of attribute via MQTT with JSON payload."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
fire_mqtt_message(self.hass, 'attr-topic', '{ "val": "100" }')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert '100' == \
state.attributes.get('val')
@patch('homeassistant.components.mqtt._LOGGER')
def test_update_with_json_attrs_not_dict(self, mock_logger):
"""Test attributes get extracted from a JSON result."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
fire_mqtt_message(self.hass, 'attr-topic', '[ "list", "of", "things"]')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert state.attributes.get('val') is None
mock_logger.warning.assert_called_with(
'JSON result was not a dictionary')
@patch('homeassistant.components.mqtt._LOGGER')
def test_update_with_json_attrs_bad_JSON(self, mock_logger):
"""Test attributes get extracted from a JSON result."""
mock_component(self.hass, 'mqtt')
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'json_attributes_topic': 'attr-topic'
}
})
fire_mqtt_message(self.hass, 'attr-topic', 'This is not JSON')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
assert state.attributes.get('val') is None
mock_logger.warning.assert_called_with(
'Erroneous JSON: %s', 'This is not JSON')
async def test_unique_id(hass):
"""Test unique id option only creates one sensor per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: [{
'platform': 'mqtt',
'name': 'Test 1',
'state_topic': 'test-topic',
'unique_id': 'TOTALLY_UNIQUE'
}, {
'platform': 'mqtt',
'name': 'Test 2',
'state_topic': 'test-topic',
'unique_id': 'TOTALLY_UNIQUE'
}]
})
async_fire_mqtt_message(hass, 'test-topic', 'payload')
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog):
"""Test removal of discovered binary_sensor."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "availability_topic": "availability_topic" }'
)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
data)
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Beer'
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is None
async def test_discovery_update_binary_sensor(hass, mqtt_mock, caplog):
"""Test removal of discovered binary_sensor."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "availability_topic": "availability_topic1" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic2",'
' "availability_topic": "availability_topic2" }'
)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
data1)
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Beer'
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
data2)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Milk'
state = hass.states.get('binary_sensor.milk')
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT binary sensor device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, 'homeassistant', {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps({
'platform': 'mqtt',
'name': 'Test 1',
'state_topic': 'test-topic',
'device': {
'identifiers': ['helloworld'],
'connections': [
["mac", "02:5b:26:a8:dc:12"],
],
'manufacturer': 'Whatever',
'name': 'Beer',
'model': 'Glass',
'sw_version': '0.1-beta',
},
'unique_id': 'veryunique'
})
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
data)
await hass.async_block_till_done()
await hass.async_block_till_done()
device = registry.async_get_device({('mqtt', 'helloworld')}, set())
assert device is not None
assert device.identifiers == {('mqtt', 'helloworld')}
assert device.connections == {('mac', "02:5b:26:a8:dc:12")}
assert device.manufacturer == 'Whatever'
assert device.name == 'Beer'
assert device.model == 'Glass'
assert device.sw_version == '0.1-beta'
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: [{
'platform': 'mqtt',
'name': 'beer',
'state_topic': 'test-topic',
'availability_topic': 'avty-topic',
'unique_id': 'TOTALLY_UNIQUE'
}]
})
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity(
'binary_sensor.beer', new_entity_id='binary_sensor.milk')
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is None
state = hass.states.get('binary_sensor.milk')
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call('test-topic', ANY, 0, 'utf-8')
mock_mqtt.async_subscribe.assert_any_call('avty-topic', ANY, 0, 'utf-8')
|
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from Crypto.PublicKey import RSA
from Crypto.Util import asn1
from oslo_config import cfg
import pki
import pki.cert
import pki.client
import pki.crypto as cryptoutil
import pki.key as key
import pki.kra
import pki.profile
from requests import exceptions as request_exceptions
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
import barbican.plugin.interface.certificate_manager as cm
import barbican.plugin.interface.secret_store as sstore
CONF = cfg.CONF
LOG = utils.getLogger(__name__)
dogtag_plugin_group = cfg.OptGroup(name='dogtag_plugin',
title="Dogtag Plugin Options")
dogtag_plugin_opts = [
cfg.StrOpt('pem_path',
help=u._('Path to PEM file for authentication')),
cfg.StrOpt('dogtag_host',
default="localhost",
help=u._('Hostname for the Dogtag instance')),
cfg.StrOpt('dogtag_port',
default="8443",
help=u._('Port for the Dogtag instance')),
cfg.StrOpt('nss_db_path',
help=u._('Path to the NSS certificate database')),
cfg.StrOpt('nss_password',
help=u._('Password for NSS certificate database')),
cfg.StrOpt('simple_cmc_profile',
help=u._('Profile for simple CMC requests'))
]
CONF.register_group(dogtag_plugin_group)
CONF.register_opts(dogtag_plugin_opts, group=dogtag_plugin_group)
def setup_nss_db(conf):
crypto = None
create_nss_db = False
nss_db_path = conf.dogtag_plugin.nss_db_path
if nss_db_path is not None:
nss_password = conf.dogtag_plugin.nss_password
if nss_password is None:
raise ValueError(u._("nss_password is required"))
if not os.path.exists(nss_db_path):
create_nss_db = True
cryptoutil.NSSCryptoProvider.setup_database(
nss_db_path, nss_password, over_write=True)
crypto = cryptoutil.NSSCryptoProvider(nss_db_path, nss_password)
return crypto, create_nss_db
def create_connection(conf, subsystem_path):
pem_path = conf.dogtag_plugin.pem_path
if pem_path is None:
raise ValueError(u._("pem_path is required"))
connection = pki.client.PKIConnection(
'https',
conf.dogtag_plugin.dogtag_host,
conf.dogtag_plugin.dogtag_port,
subsystem_path)
connection.set_authentication_cert(pem_path)
return connection
class DogtagPluginAlgorithmException(exception.BarbicanException):
message = u._("Invalid algorithm passed in")
class DogtagPluginNotSupportedException(exception.NotSupported):
message = u._("Operation not supported by Dogtag Plugin")
def __init__(self, msg=None):
if not msg:
message = self.message
else:
message = msg
super(DogtagPluginNotSupportedException, self).__init__(message)
class DogtagKRAPlugin(sstore.SecretStoreBase):
"""Implementation of the secret store plugin with KRA as the backend."""
TRANSPORT_NICK = "KRA transport cert"
# metadata constants
ALG = "alg"
BIT_LENGTH = "bit_length"
KEY_ID = "key_id"
SECRET_TYPE = "secret_type"
SECRET_MODE = "secret_mode"
PASSPHRASE_KEY_ID = "passphrase_key_id"
CONVERT_TO_PEM = "convert_to_pem"
# string constants
DSA_PRIVATE_KEY_HEADER = '-----BEGIN DSA PRIVATE KEY-----'
DSA_PRIVATE_KEY_FOOTER = '-----END DSA PRIVATE KEY-----'
DSA_PUBLIC_KEY_HEADER = '-----BEGIN DSA PUBLIC KEY-----'
DSA_PUBLIC_KEY_FOOTER = '-----END DSA PUBLIC KEY-----'
def __init__(self, conf=CONF):
"""Constructor - create the keyclient."""
LOG.debug("starting DogtagKRAPlugin init")
crypto, create_nss_db = setup_nss_db(conf)
connection = create_connection(conf, 'kra')
# create kraclient
kraclient = pki.kra.KRAClient(connection, crypto)
self.keyclient = kraclient.keys
self.systemcert_client = kraclient.system_certs
if crypto is not None:
if create_nss_db:
self.import_transport_cert(crypto)
crypto.initialize()
self.keyclient.set_transport_cert(
DogtagKRAPlugin.TRANSPORT_NICK)
LOG.debug("completed DogtagKRAPlugin init")
def import_transport_cert(self, crypto):
# Get transport cert and insert in the certdb
transport_cert = self.systemcert_client.get_transport_cert()
crypto.import_cert(DogtagKRAPlugin.TRANSPORT_NICK,
transport_cert,
"u,u,u")
def store_secret(self, secret_dto):
"""Store a secret in the KRA
If secret_dto.transport_key is not None, then we expect
secret_dto.secret to include a base64 encoded PKIArchiveOptions
structure as defined in section 6.4 of RFC 2511. This package contains
a transport key wrapped session key, the session key wrapped secret
and parameters to specify the symmetric key wrapping.
Otherwise, the data is unencrypted and we use a call to archive_key()
to have the Dogtag KRA client generate the relevant session keys.
The secret_dto contains additional information on the type of secret
that is being stored. We will use that shortly. For, now, lets just
assume that its all PASS_PHRASE_TYPE
Returns a dict with the relevant metadata (which in this case is just
the key_id
"""
data_type = key.KeyClient.PASS_PHRASE_TYPE
client_key_id = uuid.uuid4().hex
if secret_dto.transport_key is not None:
# TODO(alee-3) send the transport key with the archival request
# once the Dogtag Client API changes.
response = self.keyclient.archive_pki_options(
client_key_id,
data_type,
secret_dto.secret,
key_algorithm=None,
key_size=None)
else:
response = self.keyclient.archive_key(
client_key_id,
data_type,
secret_dto.secret,
key_algorithm=None,
key_size=None)
meta_dict = {DogtagKRAPlugin.KEY_ID: response.get_key_id()}
self._store_secret_attributes(meta_dict, secret_dto)
return meta_dict
def get_secret(self, secret_metadata):
"""Retrieve a secret from the KRA
The secret_metadata is simply the dict returned by a store_secret() or
get_secret() call. We will extract the key_id from this dict.
Note: There are two ways to retrieve secrets from the KRA.
The first method calls retrieve_key without a wrapping key. This
relies on the KRA client to generate a wrapping key (and wrap it with
the KRA transport cert), and is completely transparent to the
Barbican server. What is returned to the caller is the
unencrypted secret.
The second way is to provide a wrapping key that would be generated
on the barbican client. That way only the client will be
able to unwrap the secret. This wrapping key is provided in the
secret_metadata by Barbican core.
Format/Type of the secret returned in the SecretDTO object.
-----------------------------------------------------------
The type of the secret returned is always dependent on the way it is
stored using the store_secret method.
In case of strings - like passphrase/PEM strings, the return will be a
string.
In case of binary data - the return will be the actual binary data.
In case of retrieving an asymmetric key that is generated using the
dogtag plugin, then the binary representation of, the asymmetric key in
PEM format, is returned
"""
key_id = secret_metadata[DogtagKRAPlugin.KEY_ID]
secret_type = secret_metadata.get(DogtagKRAPlugin.SECRET_TYPE, None)
key_spec = sstore.KeySpec(
alg=secret_metadata.get(DogtagKRAPlugin.ALG, None),
bit_length=secret_metadata.get(DogtagKRAPlugin.BIT_LENGTH, None),
mode=secret_metadata.get(DogtagKRAPlugin.SECRET_MODE, None),
passphrase=None
)
passphrase = self._get_passphrase_for_a_private_key(
secret_metadata, key_spec)
recovered_key = None
twsk = DogtagKRAPlugin._get_trans_wrapped_session_key(secret_metadata)
if DogtagKRAPlugin.CONVERT_TO_PEM in secret_metadata:
# Case for returning the asymmetric keys generated in KRA.
# Asymmetric keys generated in KRA are not generated in PEM format.
# This marker DogtagKRAPlugin.CONVERT_TO_PEM is set in the
# secret_metadata for asymmetric keys generated in KRA to
# help convert the returned private/public keys to PEM format and
# eventually return the binary data of the keys in PEM format.
if secret_type == sstore.SecretType.PUBLIC:
# Public key should be retrieved using the get_key_info method
# as it is treated as an attribute of the asymmetric key pair
# stored in the KRA database.
if key_spec.alg is None:
raise sstore.SecretAlgorithmNotSupportedException('None')
key_info = self.keyclient.get_key_info(key_id)
if key_spec.alg.upper() == key.KeyClient.RSA_ALGORITHM:
recovered_key = (RSA.importKey(key_info.public_key)
.publickey()
.exportKey('PEM')).encode('utf-8')
elif key_spec.alg.upper() == key.KeyClient.DSA_ALGORITHM:
pub_seq = asn1.DerSequence()
pub_seq[:] = key_info.public_key
recovered_key = (
("%s\n%s%s" %
(DogtagKRAPlugin.DSA_PUBLIC_KEY_HEADER,
pub_seq.encode().encode("base64"),
DogtagKRAPlugin.DSA_PUBLIC_KEY_FOOTER)
).encode('utf-8')
)
else:
raise sstore.SecretAlgorithmNotSupportedException(
key_spec.alg.upper()
)
elif secret_type == sstore.SecretType.PRIVATE:
key_data = self.keyclient.retrieve_key(key_id)
if key_spec.alg.upper() == key.KeyClient.RSA_ALGORITHM:
recovered_key = (
(RSA.importKey(key_data.data)
.exportKey('PEM', passphrase))
.encode('utf-8')
)
elif key_spec.alg.upper() == key.KeyClient.DSA_ALGORITHM:
pub_seq = asn1.DerSequence()
pub_seq[:] = key_data.data
recovered_key = (
("%s\n%s%s" %
(DogtagKRAPlugin.DSA_PRIVATE_KEY_HEADER,
pub_seq.encode().encode("base64"),
DogtagKRAPlugin.DSA_PRIVATE_KEY_FOOTER)
).encode('utf-8')
)
else:
raise sstore.SecretAlgorithmNotSupportedException(
key_spec.alg.upper()
)
else:
# TODO(alee-3) send transport key as well when dogtag client API
# changes in case the transport key has changed.
key_data = self.keyclient.retrieve_key(key_id, twsk)
if twsk:
# The data returned is a byte array.
recovered_key = key_data.encrypted_data
else:
recovered_key = key_data.data
# TODO(alee) remove final field when content_type is removed
# from secret_dto
ret = sstore.SecretDTO(
type=secret_type,
secret=recovered_key,
key_spec=key_spec,
content_type=None,
transport_key=None)
return ret
def delete_secret(self, secret_metadata):
"""Delete a secret from the KRA
There is currently no way to delete a secret in Dogtag.
We will be implementing such a method shortly.
"""
pass
def generate_symmetric_key(self, key_spec):
"""Generate a symmetric key
This calls generate_symmetric_key() on the KRA passing in the
algorithm, bit_length and id (used as the client_key_id) from
the secret. The remaining parameters are not used.
Returns a metadata object that can be used for retrieving the secret.
"""
usages = [key.SymKeyGenerationRequest.DECRYPT_USAGE,
key.SymKeyGenerationRequest.ENCRYPT_USAGE]
client_key_id = uuid.uuid4().hex
algorithm = self._map_algorithm(key_spec.alg.lower())
if algorithm is None:
raise DogtagPluginAlgorithmException
passphrase = key_spec.passphrase
if passphrase:
raise DogtagPluginNotSupportedException(
u._("Passphrase encryption is not supported for symmetric"
" key generating algorithms."))
response = self.keyclient.generate_symmetric_key(
client_key_id,
algorithm,
key_spec.bit_length,
usages)
return {DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.SECRET_MODE: key_spec.mode,
DogtagKRAPlugin.SECRET_TYPE: sstore.SecretType.SYMMETRIC,
DogtagKRAPlugin.KEY_ID: response.get_key_id()}
def generate_asymmetric_key(self, key_spec):
"""Generate an asymmetric key."""
usages = [key.AsymKeyGenerationRequest.DECRYPT_USAGE,
key.AsymKeyGenerationRequest.ENCRYPT_USAGE]
client_key_id = uuid.uuid4().hex
algorithm = self._map_algorithm(key_spec.alg.lower())
passphrase = key_spec.passphrase
if algorithm is None:
raise DogtagPluginAlgorithmException
passphrase_key_id = None
passphrase_metadata = None
if passphrase:
if algorithm == key.KeyClient.DSA_ALGORITHM:
raise DogtagPluginNotSupportedException(
u._("Passphrase encryption is not "
"supported for DSA algorithm")
)
stored_passphrase_info = self.keyclient.archive_key(
uuid.uuid4().hex,
self.keyclient.PASS_PHRASE_TYPE,
passphrase)
passphrase_key_id = stored_passphrase_info.get_key_id()
passphrase_metadata = {
DogtagKRAPlugin.KEY_ID: passphrase_key_id
}
response = self.keyclient.generate_asymmetric_key(
client_key_id,
algorithm,
key_spec.bit_length,
usages)
public_key_metadata = {
DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.SECRET_TYPE: sstore.SecretType.PUBLIC,
DogtagKRAPlugin.KEY_ID: response.get_key_id(),
DogtagKRAPlugin.CONVERT_TO_PEM: "true"
}
private_key_metadata = {
DogtagKRAPlugin.ALG: key_spec.alg,
DogtagKRAPlugin.BIT_LENGTH: key_spec.bit_length,
DogtagKRAPlugin.SECRET_TYPE: sstore.SecretType.PRIVATE,
DogtagKRAPlugin.KEY_ID: response.get_key_id(),
DogtagKRAPlugin.CONVERT_TO_PEM: "true"
}
if passphrase_key_id:
private_key_metadata[DogtagKRAPlugin.PASSPHRASE_KEY_ID] = (
passphrase_key_id
)
return sstore.AsymmetricKeyMetadataDTO(private_key_metadata,
public_key_metadata,
passphrase_metadata)
def generate_supports(self, key_spec):
"""Key generation supported?
Specifies whether the plugin supports key generation with the
given key_spec.
For now, we will just check the algorithm. When dogtag adds a
call to check the bit length as well, we will use that call to
take advantage of the bit_length information
"""
return self._map_algorithm(key_spec.alg) is not None
def store_secret_supports(self, key_spec):
"""Key storage supported?
Specifies whether the plugin supports storage of the secret given
the attributes included in the KeySpec
"""
return True
@staticmethod
def _map_algorithm(algorithm):
"""Map Barbican algorithms to Dogtag plugin algorithms.
Note that only algorithms supported by Dogtag will be mapped.
"""
if algorithm == sstore.KeyAlgorithm.AES:
return key.KeyClient.AES_ALGORITHM
elif algorithm == sstore.KeyAlgorithm.DES:
return key.KeyClient.DES_ALGORITHM
elif algorithm == sstore.KeyAlgorithm.DESEDE:
return key.KeyClient.DES3_ALGORITHM
elif algorithm == sstore.KeyAlgorithm.DSA:
return key.KeyClient.DSA_ALGORITHM
elif algorithm == sstore.KeyAlgorithm.RSA:
return key.KeyClient.RSA_ALGORITHM
elif algorithm == sstore.KeyAlgorithm.DIFFIE_HELLMAN:
# may be supported, needs to be tested
return None
elif algorithm == sstore.KeyAlgorithm.EC:
# asymmetric keys not yet supported
return None
else:
return None
@staticmethod
def _store_secret_attributes(meta_dict, secret_dto):
# store the following attributes for retrieval
key_spec = secret_dto.key_spec
if key_spec is not None:
if key_spec.alg is not None:
meta_dict[DogtagKRAPlugin.ALG] = key_spec.alg
if key_spec.bit_length is not None:
meta_dict[DogtagKRAPlugin.BIT_LENGTH] = key_spec.bit_length
if key_spec.mode is not None:
meta_dict[DogtagKRAPlugin.SECRET_MODE] = key_spec.mode
if secret_dto.type is not None:
meta_dict[DogtagKRAPlugin.SECRET_TYPE] = secret_dto.type
def _get_passphrase_for_a_private_key(self, secret_metadata, key_spec):
"""Retrieve the passphrase for the private key stored in the KRA."""
secret_type = secret_metadata.get(DogtagKRAPlugin.SECRET_TYPE, None)
if secret_type is None:
return None
if key_spec.alg is None:
return None
passphrase = None
if DogtagKRAPlugin.PASSPHRASE_KEY_ID in secret_metadata:
if key_spec.alg.upper() == key.KeyClient.RSA_ALGORITHM:
passphrase = self.keyclient.retrieve_key(
secret_metadata.get(DogtagKRAPlugin.PASSPHRASE_KEY_ID)
).data
else:
if key_spec.alg.upper() == key.KeyClient.DSA_ALGORITHM:
raise sstore.SecretGeneralException(
u._("DSA keys should not have a passphrase in the"
" database, for being used during retrieval.")
)
raise sstore.SecretGeneralException(
u._("Secrets of type {secret_type} should not have a "
"passphrase in the database, for being used during "
"retrieval.").format(secret_type=secret_type)
)
return passphrase
@staticmethod
def _get_trans_wrapped_session_key(secret_metadata):
twsk = secret_metadata.get('trans_wrapped_session_key', None)
secret_type = secret_metadata.get(DogtagKRAPlugin.SECRET_TYPE, None)
if secret_type in [sstore.SecretType.PUBLIC,
sstore.SecretType.PRIVATE]:
if twsk:
raise DogtagPluginNotSupportedException(
u._("Encryption using session key is not supported when "
"retrieving a {secret_type} "
"key.").format(secret_type=secret_type)
)
return twsk
def _catch_request_exception(ca_related_function):
def _catch_ca_unavailable(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except request_exceptions.RequestException:
return cm.ResultDTO(
cm.CertificateStatus.CA_UNAVAILABLE_FOR_REQUEST)
return _catch_ca_unavailable
def _catch_enrollment_exceptions(ca_related_function):
def _catch_enrollment_exception(self, *args, **kwargs):
try:
return ca_related_function(self, *args, **kwargs)
except pki.BadRequestException as e:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=e.message)
except pki.PKIException as e:
raise cm.CertificateGeneralException(
u._("Exception thrown by enroll_cert: {message}").format(
message=e.message))
return _catch_enrollment_exception
class DogtagCAPlugin(cm.CertificatePluginBase):
"""Implementation of the cert plugin with Dogtag CA as the backend."""
# order_metadata fields
PROFILE_ID = "profile_id"
# plugin_metadata fields
REQUEST_ID = "request_id"
def __init__(self, conf=CONF):
"""Constructor - create the cert clients."""
crypto, create_nss_db = setup_nss_db(conf)
connection = create_connection(conf, 'ca')
self.certclient = pki.cert.CertClient(connection)
if crypto is not None:
crypto.initialize()
self.simple_cmc_profile = conf.dogtag_plugin.simple_cmc_profile
def _get_request_id(self, order_id, plugin_meta, operation):
request_id = plugin_meta.get(self.REQUEST_ID, None)
if not request_id:
raise cm.CertificateGeneralException(
u._(
"{request} not found for {operation} for "
"order_id {order_id}"
).format(
request=self.REQUEST_ID,
operation=operation,
order_id=order_id
)
)
return request_id
@_catch_request_exception
def _get_request(self, request_id):
try:
return self.certclient.get_request(request_id)
except pki.RequestNotFoundException:
return None
@_catch_request_exception
def _get_cert(self, cert_id):
try:
return self.certclient.get_cert(cert_id)
except pki.CertNotFoundException:
return None
def check_certificate_status(self, order_id, order_meta, plugin_meta):
"""Check the status of a certificate request.
:param order_id: ID of the order associated with this request
:param order_meta: order_metadata associated with this order
:param plugin_meta: data populated by previous calls for this order,
in particular the request_id
:return: cm.ResultDTO
"""
request_id = self._get_request_id(order_id, plugin_meta, "checking")
request = self._get_request(request_id)
if not request:
raise cm.CertificateGeneralException(
u._(
"No request found for request_id {request_id} for "
"order {order_id}"
).format(
request_id=request_id,
order_id=order_id
)
)
request_status = request.request_status
if request_status == pki.cert.CertRequestStatus.REJECTED:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=request.error_message)
elif request_status == pki.cert.CertRequestStatus.CANCELED:
return cm.ResultDTO(
cm.CertificateStatus.REQUEST_CANCELED)
elif request_status == pki.cert.CertRequestStatus.PENDING:
return cm.ResultDTO(
cm.CertificateStatus.WAITING_FOR_CA)
elif request_status == pki.cert.CertRequestStatus.COMPLETE:
# get the cert
cert_id = request.cert_id
if not cert_id:
raise cm.CertificateGeneralException(
u._(
"Request {request_id} reports status_complete, but no "
"cert_id has been returned"
).format(
request_id=request_id
)
)
cert = self._get_cert(cert_id)
if not cert:
raise cm.CertificateGeneralException(
u._("Certificate not found for cert_id: {cert_id}").format(
cert_id=cert_id
)
)
return cm.ResultDTO(
cm.CertificateStatus.CERTIFICATE_GENERATED,
certificate=cert.encoded,
intermediates=cert.pkcs7_cert_chain)
else:
raise cm.CertificateGeneralException(
u._("Invalid request_status returned by CA"))
@_catch_request_exception
def issue_certificate_request(self, order_id, order_meta, plugin_meta):
"""Issue a certificate request to the Dogtag CA
Call the relevant certificate issuance function depending on the
Barbican defined request type in the order_meta.
:param order_id: ID of the order associated with this request
:param order_meta: dict containing all the inputs for this request.
This includes the request_type.
:param plugin_meta: Used to store data for status check
:return: cm.ResultDTO
"""
request_type = order_meta.get(
cm.REQUEST_TYPE,
cm.CertificateRequestType.CUSTOM_REQUEST)
jump_table = {
cm.CertificateRequestType.SIMPLE_CMC_REQUEST:
self._issue_simple_cmc_request,
cm.CertificateRequestType.FULL_CMC_REQUEST:
self._issue_full_cmc_request,
cm.CertificateRequestType.STORED_KEY_REQUEST:
self._issue_stored_key_request,
cm.CertificateRequestType.CUSTOM_REQUEST:
self._issue_custom_certificate_request
}
if request_type not in jump_table:
raise DogtagPluginNotSupportedException(
"Dogtag plugin does not support %s request type".format(
request_type))
return jump_table[request_type](order_id, order_meta, plugin_meta)
@_catch_enrollment_exceptions
def _issue_simple_cmc_request(self, order_id, order_meta, plugin_meta):
"""Issue a simple CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:return: cm.ResultDTO
"""
profile_id = self.simple_cmc_profile
inputs = {
'cert_request_type': 'pkcs10',
'cert_request': order_meta.get('request_data')
}
request = self.certclient.create_enrollment_request(profile_id, inputs)
results = self.certclient.submit_enrollment_request(request)
return self._process_enrollment_results(results, plugin_meta)
def _issue_full_cmc_request(self, order_id, order_meta, plugin_meta):
"""Issue a full CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:return: cm.ResultDTO
"""
raise DogtagPluginNotSupportedException(
"Dogtag plugin does not support %s request type".format(
cm.CertificateRequestType.FULL_CMC_REQUEST))
def _issue_stored_key_request(self, order_id, order_meta, plugin_meta):
"""Issue a simple CMC request to the Dogtag CA.
:param order_id:
:param order_meta:
:param plugin_meta:
:return: cm.ResultDTO
"""
return self._issue_simple_cmc_request(
order_id,
order_meta,
plugin_meta)
@_catch_enrollment_exceptions
def _issue_custom_certificate_request(self, order_id, order_meta,
plugin_meta):
"""Issue a custom certificate request to Dogtag CA
For now, we assume that we are talking to the Dogtag CA that
is deployed with the KRA back-end, and we are connected as a
CA agent. This means that we can use the agent convenience
method to automatically approve the certificate request.
:param order_id: ID of the order associated with this request
:param order_meta: dict containing all the inputs required for a
particular profile. One of these must be the profile_id.
The exact fields (both optional and mandatory) depend on the
profile, but they will be exposed to the user in a method to
expose syntax. Depending on the profile, only the relevant fields
will be populated in the request. All others will be ignored.
:param plugin_meta: Used to store data for status check.
:return: cm.ResultDTO
"""
profile_id = order_meta.get(self.PROFILE_ID, None)
if not profile_id:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=u._("No profile_id specified"))
results = self.certclient.enroll_cert(profile_id, order_meta)
return self._process_enrollment_results(results, plugin_meta)
def _process_enrollment_results(self, enrollment_results, plugin_meta):
"""Process results received from Dogtag CA for enrollment
:param enrollment_results: list of CertEnrollmentResult objects
:param plugin_meta: metadata dict for storing plugin specific data
:return: cm.ResultDTO
"""
# Although it is possible to create multiple certs in an invocation
# of enroll_cert, Barbican cannot handle this case. Assume
# only once cert and request generated for now.
enrollment_result = enrollment_results[0]
request = enrollment_result.request
if not request:
raise cm.CertificateGeneralException(
u._("No request returned in enrollment_results"))
# store the request_id in the plugin metadata
plugin_meta[self.REQUEST_ID] = request.request_id
cert = enrollment_result.cert
if not cert:
request_status = request.request_status
if request_status == pki.cert.CertRequestStatus.REJECTED:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=request.error_message)
elif request_status == pki.cert.CertRequestStatus.CANCELED:
return cm.ResultDTO(cm.CertificateStatus.REQUEST_CANCELED)
elif request_status == pki.cert.CertRequestStatus.PENDING:
return cm.ResultDTO(cm.CertificateStatus.WAITING_FOR_CA)
elif request_status == pki.cert.CertRequestStatus.COMPLETE:
raise cm.CertificateGeneralException(
u._("request_id {req_id} returns COMPLETE but no cert "
"returned").format(req_id=request.request_id))
else:
raise cm.CertificateGeneralException(
u._("Invalid request_status {status} for "
"request_id {request_id}").format(
status=request_status,
request_id=request.request_id)
)
return cm.ResultDTO(
cm.CertificateStatus.CERTIFICATE_GENERATED,
certificate=cert.encoded,
intermediates=cert.pkcs7_cert_chain)
def modify_certificate_request(self, order_id, order_meta, plugin_meta):
"""Modify a certificate request.
Once a certificate request is generated, it cannot be modified.
The only alternative is to cancel the request (if it has not already
completed) and attempt a fresh enrolment. That is what will be
attempted here.
:param order_id: ID for this order
:param order_meta: order metadata. It is assumed that the newly
modified request data will be present here.
:param plugin_meta: data stored on behalf of the plugin for further
operations
:return: ResultDTO:
"""
result_dto = self.cancel_certificate_request(
order_id, order_meta, plugin_meta)
if result_dto.status == cm.CertificateStatus.REQUEST_CANCELED:
return self.issue_certificate_request(
order_id, order_meta, plugin_meta)
elif result_dto.status == cm.CertificateStatus.INVALID_OPERATION:
return cm.ResultDTO(
cm.CertificateStatus.INVALID_OPERATION,
status_message=u._(
"Modify request: unable to cancel: "
"{message}").format(message=result_dto.status_message)
)
else:
# other status (ca_unavailable, client_data_issue)
# return result from cancel operation
return result_dto
@_catch_request_exception
def cancel_certificate_request(self, order_id, order_meta, plugin_meta):
"""Cancel a certificate request.
:param order_id: ID for the order associated with this request
:param order_meta: order metadata fdr this request
:param plugin_meta: data stored by plugin for further processing.
In particular, the request_id
:return: cm.ResultDTO:
"""
request_id = self._get_request_id(order_id, plugin_meta, "cancelling")
try:
review_response = self.certclient.review_request(request_id)
self.certclient.cancel_request(request_id, review_response)
return cm.ResultDTO(cm.CertificateStatus.REQUEST_CANCELED)
except pki.RequestNotFoundException:
return cm.ResultDTO(
cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
status_message=u._("no request found for this order"))
except pki.ConflictingOperationException as e:
return cm.ResultDTO(
cm.CertificateStatus.INVALID_OPERATION,
status_message=e.message)
def supports(self, certificate_spec):
if cm.CA_TYPE in certificate_spec:
return certificate_spec[cm.CA_TYPE] == cm.CA_PLUGIN_TYPE_DOGTAG
if cm.CA_PLUGIN_TYPE_SYMANTEC in certificate_spec:
# TODO(alee-3) Handle case where SKI is provided
pass
return True
def supported_request_types(self):
"""Returns the request_types supported by this plugin.
:returns: a list of the Barbican-core defined request_types
supported by this plugin.
"""
return [cm.CertificateRequestType.SIMPLE_CMC_REQUEST,
cm.CertificateRequestType.STORED_KEY_REQUEST,
cm.CertificateRequestType.CUSTOM_REQUEST]
|
|
#!/usr/bin/env python
import argparse
import copy
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = """
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = (score - ({min})) / (({max}) - ({min}));
opacity = Math.log10(opacity) + Math.log10({max});
""",
'blast': """
var opacity = 0;
if(score == 0.0) {
opacity = 1;
} else{
opacity = (20 - Math.log10(score)) / 180;
}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', hexstr.decode('hex'))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.iteritems():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=False, gencode=1):
self.TN_TABLE = {
'gff3': '--gff',
'gff': '--gff',
'bed': '--bed',
'genbank': '--gbk',
}
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
if standalone:
self.clone_jbrowse(self.jbrowse, self.outdir)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def process_genomes(self):
for genome_path in self.genome_paths:
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--fasta', genome_path])
# Generate name
self.subprocess_check_call([
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
])
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data.keys()) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(json.dumps(json_data))
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
config = {
'glyph': 'JBrowse/View/FeatureGlyph/Segments',
"category": trackData['category'],
}
clientConfig = trackData['style']
cmd = ['perl', self._jbrowse_bin('flatfile-to-json.pl'),
'--gff', gff3,
'--trackLabel', trackData['label'],
'--key', trackData['key'],
'--clientConfig', json.dumps(clientConfig),
'--config', json.dumps(config),
'--trackType', 'JBrowse/View/Track/CanvasFeatures'
]
self.subprocess_check_call(cmd)
os.unlink(gff3)
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
cmd = ['ln', data, dest]
self.subprocess_check_call(cmd)
trackData.update({
"urlTemplate": os.path.join('..', dest),
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
cmd = ['ln', '-s', os.path.realpath(data), dest]
self.subprocess_check_call(cmd)
cmd = ['ln', '-s', os.path.realpath(bam_index), dest + '.bai']
self.subprocess_check_call(cmd)
trackData.update({
"urlTemplate": os.path.join('..', dest),
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
})
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
trackData.update({
"urlTemplate": os.path.join('..', dest + '.gz'),
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def add_features(self, data, format, trackData, gffOpts, **kwargs):
cmd = [
'perl', self._jbrowse_bin('flatfile-to-json.pl'),
self.TN_TABLE.get(format, 'gff'),
data,
'--trackLabel', trackData['label'],
# '--trackType', 'JBrowse/View/Track/CanvasFeatures',
'--key', trackData['key']
]
config = copy.copy(trackData)
clientConfig = trackData['style']
del config['style']
if 'match' in gffOpts:
config['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
cmd += ['--type', gffOpts['match']]
cmd += ['--clientConfig', json.dumps(clientConfig),
]
if 'trackType' in gffOpts:
cmd += [
'--trackType', gffOpts['trackType']
]
else:
cmd += [
'--trackType', 'JBrowse/View/Track/CanvasFeatures'
]
cmd.extend(['--config', json.dumps(config)])
self.subprocess_check_call(cmd)
def process_annotations(self, track):
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'category': track['category'],
}
for i, (dataset_path, dataset_ext, track_human_label) in enumerate(track['trackfiles']):
log.info('Processing %s / %s', track['category'], track_human_label)
outputTrackConfig['key'] = track_human_label
hashData = [dataset_path, track_human_label, track['category']]
outputTrackConfig['label'] = hashlib.md5('|'.join(hashData)).hexdigest() + '_%s' % i
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3', 'bed'):
self.add_features(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i])
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'])
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
viz_data.update(generalData)
self._add_json(viz_data)
def clone_jbrowse(self, jbrowse_dir, destination):
"""Clone a JBrowse directory into a destination directory.
"""
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l', '-exec', 'rm', "'{}'", '+']
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=file, help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', help='Standalone mode includes a copy of JBrowse', action='store_true')
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[os.path.realpath(x.text) for x in root.findall('metadata/genomes/genome')],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
}
}
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = [
(os.path.realpath(x.attrib['path']), x.attrib['ext'], x.attrib['label'])
for x in track.findall('files/trackFile')
]
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
|
|
"""This section introduces classes used by pulsar
:ref:`wsgi application <apps-wsgi>` to pass a request/response state
during an HTTP request.
.. contents::
:local:
The :class:`WsgiRequest` is a thin wrapper around a WSGI ``environ``
dictionary.
It contains only the ``environ`` as its private data.
The :class:`WsgiResponse`, which is available in the
:class:`WsgiRequest.response` attribute, is an iterable over bytestring with
several utility methods for manipulating headers and asynchronous content.
Environ Mixin
=====================
.. autoclass:: EnvironMixin
:members:
:member-order: bysource
.. _app-wsgi-request:
Wsgi Request
=====================
.. autoclass:: WsgiRequest
:members:
:member-order: bysource
.. _wsgi-response:
Wsgi Response
=====================
.. autoclass:: WsgiResponse
:members:
:member-order: bysource
Wsgi File Wrapper
=====================
.. autoclass:: FileWrapper
:members:
:member-order: bysource
.. _WSGI: http://www.wsgi.org
.. _AJAX: http://en.wikipedia.org/wiki/Ajax_(programming)
.. _TLS: http://en.wikipedia.org/wiki/Transport_Layer_Security
"""
from functools import reduce, partial
from http.client import responses
import asyncio
from pulsar import Future, chain_future
from pulsar.utils.structures import AttributeDictionary
from pulsar.utils.httpurl import (Headers, SimpleCookie,
has_empty_content, REDIRECT_CODES,
ENCODE_URL_METHODS, JSON_CONTENT_TYPES,
remove_double_slash, iri_to_uri,
is_absolute_uri, parse_options_header)
from .content import HtmlDocument
from .utils import (set_wsgi_request_class, set_cookie, query_dict,
parse_accept_header, LOGGER)
from .structures import ContentAccept, CharsetAccept, LanguageAccept
from .formdata import parse_form_data
__all__ = ['EnvironMixin', 'WsgiResponse',
'WsgiRequest', 'cached_property']
MAX_BUFFER_SIZE = 2**16
ONEMB = 2**20
def redirect(path, code=None, permanent=False):
if code is None:
code = 301 if permanent else 302
assert code in REDIRECT_CODES, 'Invalid redirect status code.'
return WsgiResponse(code, response_headers=[('location', path)])
def cached_property(method):
name = method.__name__
def _(self):
if name not in self.cache:
self.cache[name] = method(self)
return self.cache[name]
return property(_, doc=method.__doc__)
def wsgi_encoder(gen, encoding):
for data in gen:
if isinstance(data, str):
yield data.encode(encoding)
else:
yield data
class WsgiResponse:
"""A WSGI response.
Instances are callable using the standard WSGI call and, importantly,
iterable::
response = WsgiResponse(200)
A :class:`WsgiResponse` is an iterable over bytes to send back to the
requesting client.
.. attribute:: status_code
Integer indicating the HTTP status, (i.e. 200)
.. attribute:: response
String indicating the HTTP status (i.e. 'OK')
.. attribute:: status
String indicating the HTTP status code and response (i.e. '200 OK')
.. attribute:: content_type
The content type of this response. Can be ``None``.
.. attribute:: headers
The :class:`.Headers` container for this response.
.. attribute:: environ
The dictionary of WSGI environment if passed to the constructor.
.. attribute:: cookies
A python :class:`SimpleCookie` container of cookies included in the
request as well as cookies set during the response.
"""
_iterated = False
_started = False
DEFAULT_STATUS_CODE = 200
def __init__(self, status=None, content=None, response_headers=None,
content_type=None, encoding=None, environ=None,
can_store_cookies=True):
self.environ = environ
self.status_code = status or self.DEFAULT_STATUS_CODE
self.encoding = encoding
self.cookies = SimpleCookie()
self.headers = Headers(response_headers, kind='server')
self.content = content
self._can_store_cookies = can_store_cookies
if content_type is not None:
self.content_type = content_type
@property
def started(self):
return self._started
@property
def iterated(self):
return self._iterated
@property
def path(self):
if self.environ:
return self.environ.get('PATH_INFO', '')
@property
def method(self):
if self.environ:
return self.environ.get('REQUEST_METHOD')
@property
def connection(self):
if self.environ:
return self.environ.get('pulsar.connection')
@property
def content(self):
return self._content
@content.setter
def content(self, content):
if not self._iterated:
if content is None:
content = ()
else:
if isinstance(content, str):
if not self.encoding: # use utf-8 if not set
self.encoding = 'utf-8'
content = content.encode(self.encoding)
if isinstance(content, bytes):
content = (content,)
self._content = content
else:
raise RuntimeError('Cannot set content. Already iterated')
def _get_content_type(self):
return self.headers.get('content-type')
def _set_content_type(self, typ):
if typ:
self.headers['content-type'] = typ
else:
self.headers.pop('content-type', None)
content_type = property(_get_content_type, _set_content_type)
@property
def response(self):
return responses.get(self.status_code)
@property
def status(self):
return '%s %s' % (self.status_code, self.response)
def __str__(self):
return self.status
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self)
@property
def is_streamed(self):
"""Check if the response is streamed.
A streamed response is an iterable with no length information.
In this case streamed means that there is no information about
the number of iterations.
This is usually `True` if a generator is passed to the response object.
"""
try:
len(self.content)
except TypeError:
return True
return False
def can_set_cookies(self):
if self.status_code < 400:
return self._can_store_cookies
def length(self):
if not self.is_streamed:
return reduce(lambda x, y: x+len(y), self.content, 0)
def start(self, start_response):
assert not self._started
self._started = True
return start_response(self.status, self.get_headers())
def __iter__(self):
if self._iterated:
raise RuntimeError('WsgiResponse can be iterated once only')
self._started = True
self._iterated = True
if self.is_streamed:
return wsgi_encoder(self.content, self.encoding or 'utf-8')
else:
return iter(self.content)
def close(self):
"""Close this response, required by WSGI
"""
if self.is_streamed:
if hasattr(self.content, 'close'):
self.content.close()
def set_cookie(self, key, **kwargs):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
set_cookie(self.cookies, key, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
set_cookie(self.cookies, key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def get_headers(self):
"""The list of headers for this response
"""
headers = self.headers
if has_empty_content(self.status_code, self.method):
headers.pop('content-type', None)
headers.pop('content-length', None)
self._content = ()
else:
if not self.is_streamed:
cl = 0
for c in self.content:
cl += len(c)
if cl == 0 and self.content_type in JSON_CONTENT_TYPES:
self._content = (b'{}',)
cl = len(self._content[0])
headers['Content-Length'] = str(cl)
ct = self.content_type
# content type encoding available
if self.encoding:
ct = ct or 'text/plain'
if 'charset=' not in ct:
ct = '%s; charset=%s' % (ct, self.encoding)
if ct:
headers['Content-Type'] = ct
if self.can_set_cookies():
for c in self.cookies.values():
headers.add_header('Set-Cookie', c.OutputString())
return list(headers)
def has_header(self, header):
return header in self.headers
__contains__ = has_header
def __setitem__(self, header, value):
self.headers[header] = value
def __getitem__(self, header):
return self.headers[header]
class EnvironMixin:
"""A wrapper around a WSGI_ environ.
Instances of this class have the :attr:`environ` attribute as their
only private data. Every other attribute is stored in the :attr:`environ`
itself at the ``pulsar.cache`` wsgi-extension key.
.. attribute:: environ
WSGI_ environ dictionary
"""
__slots__ = ('environ',)
def __init__(self, environ, name=None):
self.environ = environ
if 'pulsar.cache' not in environ:
environ['pulsar.cache'] = AttributeDictionary()
self.cache.mixins = {}
if name:
self.cache.mixins[name] = self
@property
def cache(self):
"""An :ref:`attribute dictionary <attribute-dictionary>` of
pulsar-specific data stored in the :attr:`environ` at
the wsgi-extension key ``pulsar.cache``
"""
return self.environ['pulsar.cache']
@property
def connection(self):
"""The :class:`.Connection` handling the request
"""
return self.environ.get('pulsar.connection')
@property
def _loop(self):
"""Event loop if :attr:`connection` is available.
"""
c = self.connection
if c:
return c._loop
def __getattr__(self, name):
mixin = self.cache.mixins.get(name)
if mixin is None:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
return mixin
def get(self, key, default=None):
"""Shortcut to the :attr:`environ` get method."""
return self.environ.get(key, default)
class WsgiRequest(EnvironMixin):
"""An :class:`EnvironMixin` for wsgi requests."""
def __init__(self, environ, app_handler=None, urlargs=None):
super().__init__(environ)
self.cache.cfg = environ.get('pulsar.cfg', {})
if app_handler:
self.cache.app_handler = app_handler
self.cache.urlargs = urlargs
def __repr__(self):
return self.path
def __str__(self):
return self.__repr__()
@cached_property
def content_types(self):
"""List of content types this client supports as a
:class:`.ContentAccept` object.
Obtained form the ``Accept`` request header.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'),
ContentAccept)
@cached_property
def charsets(self):
"""List of charsets this client supports as a
:class:`.CharsetAccept` object.
Obtained form the ``Accept-Charset`` request header.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def encodings(self):
"""List of encodings this client supports as
:class:`.Accept` object.
Obtained form the ``Accept-Charset`` request header.
Encodings in a HTTP term are compression encodings such as gzip.
For charsets have a look at :attr:`charsets` attribute.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def languages(self):
"""List of languages this client accepts as
:class:`.LanguageAccept` object.
Obtained form the ``Accept-Language`` request header.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
@cached_property
def cookies(self):
"""Container of request cookies
"""
cookies = SimpleCookie()
cookie = self.environ.get('HTTP_COOKIE')
if cookie:
cookies.load(cookie)
return cookies
@property
def app_handler(self):
"""The WSGI application handling this request.
The WSGI handler is responsible for setting this value in the
same way as the :class:`.Router` does.
"""
return self.cache.app_handler
@property
def urlargs(self):
"""Dictionary of url parameters obtained when matching a
:ref:`router <wsgi-router>` with this request :attr:`path`."""
return self.cache.urlargs
@property
def cfg(self):
"""The :ref:`config container <settings>` of the server
"""
return self.cache.cfg
@cached_property
def response(self):
"""The :class:`WsgiResponse` for this client request.
"""
return WsgiResponse(environ=self.environ)
#######################################################################
# environ shortcuts
@property
def is_xhr(self):
"""``True`` if this is an AJAX_ request
"""
return self.environ.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def is_secure(self):
"""``True`` if this request is via a TLS_ connection
"""
return self.environ.get('HTTPS') == 'on'
@property
def path(self):
"""Shortcut to the :attr:`~EnvironMixin.environ` ``PATH_INFO`` value.
"""
return self.environ.get('PATH_INFO', '/')
@property
def uri(self):
return self.absolute_uri()
@property
def method(self):
"""The request method (uppercase)."""
return self.environ['REQUEST_METHOD']
@cached_property
def encoding(self):
return self.content_type_options[1].get('charset', 'utf-8')
@cached_property
def content_type_options(self):
content_type = self.environ.get('CONTENT_TYPE')
if content_type:
return parse_options_header(content_type)
else:
return None, {}
def data_and_files(self, data=True, files=True, stream=None):
"""Retrieve body data.
Returns a two-elements tuple of a
:class:`~.MultiValueDict` containing data from
the request body, and data from uploaded files.
If the body data is not ready, return a :class:`~asyncio.Future`
which results in the tuple.
The result is cached.
"""
if self.method in ENCODE_URL_METHODS:
value = {}, None
else:
value = self.cache.data_and_files
if not value:
return self._data_and_files(data, files, stream)
elif data and files:
return value
elif data:
return value[0]
elif files:
return value[1]
else:
return None
def body_data(self):
"""A :class:`~.MultiValueDict` containing data from the request body.
"""
return self.data_and_files(files=False)
def _data_and_files(self, data=True, files=True, stream=None, future=None):
if future is None:
data_files = parse_form_data(self.environ, stream=stream)
if isinstance(data_files, Future):
return chain_future(
data_files,
partial(self._data_and_files, data, files, stream))
else:
data_files = future
self.cache.data_and_files = data_files
return self.data_and_files(data, files, stream)
@cached_property
def url_data(self):
"""A (cached) dictionary containing data from the ``QUERY_STRING``
in :attr:`~.EnvironMixin.environ`.
"""
return query_dict(self.environ.get('QUERY_STRING', ''),
encoding=self.encoding)
@cached_property
def html_document(self):
"""Return a cached instance of :class:`.HtmlDocument`."""
return HtmlDocument()
def get_host(self, use_x_forwarded=True):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if use_x_forwarded and ('HTTP_X_FORWARDED_HOST' in self.environ):
host = self.environ['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.environ:
host = self.environ['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.environ['SERVER_NAME']
server_port = str(self.environ['SERVER_PORT'])
if server_port != ('443' if self.is_secure else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_client_address(self, use_x_forwarded=True):
"""Obtain the client IP address
"""
xfor = self.environ.get('HTTP_X_FORWARDED_FOR')
if use_x_forwarded and xfor:
return xfor.split(',')[-1].strip()
else:
return self.environ['REMOTE_ADDR']
def full_path(self, *args, **query):
"""Return a full path"""
path = None
if args:
if len(args) > 1:
raise TypeError("full_url() takes exactly 1 argument "
"(%s given)" % len(args))
path = args[0]
if not path:
path = self.path
elif not path.startswith('/'):
path = remove_double_slash('%s/%s' % (self.path, path))
return iri_to_uri(path, query)
def absolute_uri(self, location=None, scheme=None):
"""Builds an absolute URI from ``location`` and variables
available in this request.
If no ``location`` is specified, the relative URI is built from
:meth:`full_path`.
"""
if not is_absolute_uri(location):
location = self.full_path(location)
if not scheme:
scheme = self.is_secure and 'https' or 'http'
base = '%s://%s' % (scheme, self.get_host())
return '%s%s' % (base, location)
elif not scheme:
return iri_to_uri(location)
else:
raise ValueError('Absolute location with scheme not valid')
def redirect(self, path, **kw):
"""Redirect to a different ``path``
"""
return redirect(path, **kw)
set_wsgi_request_class(WsgiRequest)
def close_object(iterator):
if hasattr(iterator, 'close'):
try:
iterator.close()
except Exception:
LOGGER.exception('Error while closing wsgi iterator')
class FileWrapper:
"""WSGI File wrapper class.
Available directly from the ``wsgi.file_wrapper`` key in the WSGI environ
dictionary. Alternatively one can use the :func:`~file_response`
high level function for serving local files.
"""
def __init__(self, file, block=None):
self.file = file
self.block = max(block or ONEMB, MAX_BUFFER_SIZE)
def __iter__(self):
while True:
data = self.file.read(self.block)
if not data:
break
future = asyncio.Future()
future.set_result(data)
yield future
def close(self):
close_object(self.file)
|
|
"""
A Maximum-Entropy model for backbone torsion angles.
Reference: Rowicka and Otwinowski 2004
"""
import numpy
from csb.statistics.pdf import BaseDensity
class MaxentModel(BaseDensity):
"""
Fourier expansion of a biangular log-probability density
"""
def __init__(self, n, beta=1.):
"""
@param n: order of the fourier expansion
@type n: int
@param beta: inverse temperature
@type beta: float
"""
super(MaxentModel, self).__init__()
self._n = int(n)
self._cc = numpy.zeros((self._n, self._n))
self._ss = numpy.zeros((self._n, self._n))
self._cs = numpy.zeros((self._n, self._n))
self._sc = numpy.zeros((self._n, self._n))
self._beta = float(beta)
@property
def beta(self):
"""
Inverse temperature
@rtype: float
"""
return self._beta
@property
def n(self):
"""
Order of the fourier expansion
@rtype: int
"""
return self._n
def load_old(self, aa, f_name):
"""
Load set of expansion coefficients from isd.
@param aa: Amino acid type
@param f_name: File containing ramachandran definition
"""
import os
params, _energies = eval(open(os.path.expanduser(f_name)).read())
params = params[self._n - 1]
for k, l, x, f, g in params[aa]:
if f == 'cos' and g == 'cos':
self._cc[k, l] = -x
elif f == 'cos' and g == 'sin':
self._cs[k, l] = -x
elif f == 'sin' and g == 'cos':
self._sc[k, l] = -x
elif f == 'sin' and g == 'sin':
self._ss[k, l] = -x
def load(self, aa, f_name):
"""
Load set of expansion coefficients from isd+.
@param aa: Amino acid type
@param f_name: File containing ramachandran definition
"""
import os
from numpy import reshape, array
from csb.io import load
f_name = os.path.expanduser(f_name)
params, _energies = load(f_name)
params = params[self._n]
a, b, c, d = params[aa]
a, b, c, d = reshape(array(a), (self._n, self._n)).astype('d'), \
reshape(array(b), (self._n, self._n)).astype('d'), \
reshape(array(c), (self._n, self._n)).astype('d'), \
reshape(array(d), (self._n, self._n)).astype('d')
# Not a typo, I accidently swichted cos*sin and sin*cos
self._cc, self._cs, self._sc, self._ss = -a, -c, -b, -d
def _periodicities(self):
return numpy.arange(self._n)
def log_prob(self, x, y):
"""
Return the energy at positions (x,y).
@param x: x-coordinates for evaluation
@type x: array-like
@param y: y-coordinates for evaluation
@type y: array-like
"""
return -self.energy(x, y)
def set(self, coef):
"""
Set the fourier expansion coefficients and calculations the
new partation function.
@param coef: expansion coefficents
@type coef: array like, with shape (4,n,n)
"""
self._cc[:, :], self._ss[:, :], self._cs[:, :], self._sc[:, :] = \
numpy.reshape(coef, (4, self._n, self._n))
self.normalize()
def get(self):
"""
Return current expansion coefficients.
"""
return numpy.array([self._cc, self._ss, self._cs, self._sc])
def energy(self, x, y=None):
"""
Return the energy at positions (x,y).
@param x: x-coordinates for evaluation
@type x: array-like
@param y: y-coordinates for evaluation
@type y: array-like
"""
from numpy import sin, cos, dot, multiply
k = self._periodicities()
cx, sx = cos(multiply.outer(k, x)), sin(multiply.outer(k, x))
if y is not None:
cy, sy = cos(multiply.outer(k, y)), sin(multiply.outer(k, y))
else:
cy, sy = cx, sx
return dot(dot(cx.T, self._cc), cy) + \
dot(dot(cx.T, self._cs), sy) + \
dot(dot(sx.T, self._sc), cy) + \
dot(dot(sx.T, self._ss), sy)
def sample_weights(self):
"""
Create a random set of expansion coefficients.
"""
from numpy import add
from numpy.random import standard_normal
k = self._periodicities()
k = add.outer(k ** 2, k ** 2)
self.set([standard_normal(k.shape) for i in range(4)])
self.normalize(True)
def prob(self, x, y):
"""
Return the probability of the configurations x cross y.
"""
from csb.numeric import exp
return exp(-self.beta * self(x, y))
def z(self):
"""
Calculate the partion function .
"""
from scipy.integrate import dblquad
from numpy import pi
return dblquad(self.prob, 0., 2 * pi, lambda x: 0., lambda x: 2 * pi)
def log_z(self, n=500, integration='simpson'):
"""
Calculate the log partion function.
"""
from numpy import pi, linspace, max
from csb.numeric import log, exp
if integration == 'simpson':
from csb.numeric import simpson_2d
x = linspace(0., 2 * pi, 2 * n + 1)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
f -= f_max
I = simpson_2d(exp(f))
return log(I) + f_max + 2 * log(dx)
elif integration == 'trapezoidal':
from csb.numeric import trapezoidal_2d
x = linspace(0., 2 * pi, n)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
f -= f_max
I = trapezoidal_2d(exp(f))
return log(I) + f_max + 2 * log(dx)
else:
raise NotImplementedError(
'Choose from trapezoidal and simpson-rule Integration')
def entropy(self, n=500):
"""
Calculate the entropy of the model.
@param n: number of integration points for numerical integration
@type n: integer
"""
from csb.numeric import trapezoidal_2d
from numpy import pi, linspace, max
from csb.numeric import log, exp
x = linspace(0., 2 * pi, n)
dx = x[1] - x[0]
f = -self.beta * self.energy(x)
f_max = max(f)
log_z = log(trapezoidal_2d(exp(f - f_max))) + f_max + 2 * log(dx)
average_energy = trapezoidal_2d(f * exp(f - f_max))\
* exp(f_max + 2 * log(dx) - log_z)
return -average_energy + log_z
def calculate_statistics(self, data):
"""
Calculate the sufficient statistics for the data.
"""
from numpy import cos, sin, dot, multiply
k = self._periodicities()
cx = cos(multiply.outer(k, data[:, 0]))
sx = sin(multiply.outer(k, data[:, 0]))
cy = cos(multiply.outer(k, data[:, 1]))
sy = sin(multiply.outer(k, data[:, 1]))
return dot(cx, cy.T), dot(sx, sy.T), dot(cx, sy.T), dot(sx, cy.T)
def normalize(self, normalize_full=True):
"""
Remove parameter, which do not have any influence on the model
and compute the partition function.
@param normalize_full: compute partition function
@type normalize_full: boolean
"""
self._cc[0, 0] = 0.
self._ss[:, 0] = 0.
self._ss[0, :] = 0.
self._cs[:, 0] = 0.
self._sc[0, :] = 0.
if normalize_full:
self._cc[0, 0] = self.log_z()
class MaxentPosterior(object):
"""
Object to hold and calculate the posterior (log)probability
given an exponential family model and corresponding data.
"""
def __init__(self, model, data):
"""
@param model: MaxentModel
@param data: two dimensonal data
"""
self._model = model
self._data = numpy.array(data)
self._stats = self.model.calculate_statistics(self._data)
self._log_likelihoods = []
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
self._stats = self.model.calculate_statistics(self._data)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = numpy.array(value)
self._stats = self.model.calculate_statistics(value)
@property
def stats(self):
return self._stats
def __call__(self, weights=None, n=100):
"""
Returns the log posterior likelihood
@param weights: optional expansion coefficients of the model,
if none are specified those of the model are used
@param n: number of integration point for calculating the partition function
"""
from numpy import sum
if weights is not None:
self.model.set(weights)
a = sum(self._stats[0] * self.model._cc)
b = sum(self._stats[1] * self.model._ss)
c = sum(self._stats[2] * self.model._cs)
d = sum(self._stats[3] * self.model._sc)
log_z = self.data.shape[0] * self.model.log_z(n=n)
log_likelihood = -self.model.beta * (a + b + c + d) - log_z
self._log_likelihoods.append(log_likelihood)
return log_likelihood
|
|
import os
import sys
import shutil
import os.path as op
from textwrap import dedent
import argparse
import subprocess as sp
import nipype
from nipype import Node, SelectFiles, DataSink, IdentityInterface
from nipype.interfaces import fsl
import lyman
import lyman.workflows as wf
from lyman import tools
from lyman.tools import add_suffix, submit_cmdline
##########################################
# Set up some info for this experiment
##########################################
exp_name = 'mvpa_raw'
altmodel = None
space = 'mni'
smoothing = 'unsmoothed'
subjects = None
regtype = 'model'
interpolation = "trilinear"
exp_type = 'test' # 'localizer' or 'test'
# determine info for
if exp_type == 'localizer':
time_list = [4.5]
searchlight_dir = 'searchlight'
searchlight_path = searchlight_dir + "/localizer_acc_{subject_id}.nii.gz"
elif exp_type == 'test':
time_list = [0,2,4,6,8,10,12]
searchlight_dir = 'searchlight_test'
searchlight_path = searchlight_dir + "/sourcehit_time{time}_acc_{{subject_id}}.nii.gz"
##########################################
# Pull info from project
##########################################
project = lyman.gather_project_info()
exp = lyman.gather_experiment_info(exp_name, altmodel)
warp_method = project['normalization']
os.environ["SUBJECTS_DIR"] = project["data_dir"]
subject_list = lyman.determine_subjects(subjects)
subj_source = tools.make_subject_source(subject_list)
exp_base = exp_name
if altmodel is not None:
exp_name = "-".join([exp_base, altmodel])
data_dir = project["data_dir"]
analysis_dir = op.join(project["analysis_dir"], exp_name)
working_dir = op.join(project["working_dir"], exp_name)
##########################################
# Set up paths to files
##########################################
reg_templates = dict(
masks="{subject_id}/preproc/run_{run}/functional_mask.nii.gz",
means="{subject_id}/preproc/run_{run}/mean_func.nii.gz")
reg_templates.update(dict(
searchlight=op.join(searchlight_path)))
reg_lists = reg_templates.keys()
print reg_lists
aff_ext = "mat" if warp_method == "fsl" else "txt"
reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
"normalization/warpfield.nii.gz")
reg_templates["affine"] = op.join(data_dir, "{subject_id}",
"normalization/affine." + aff_ext)
# Rigid (6dof) functional-to-anatomical matrices
rigid_stem = op.join(analysis_dir,
"{subject_id}/preproc/run_{run}/func2anat_")
if warp_method == "ants" and space == "mni":
reg_templates["rigids"] = rigid_stem + "tkreg.dat"
else:
reg_templates["rigids"] = rigid_stem + "flirt.mat"
ref_file = fsl.Info.standard_image("avg152T1_brain.nii.gz")
in_file = op.join(analysis_dir, reg_templates['searchlight'])
out_fname = op.basename(add_suffix(in_file, "warp"))
out_file = op.join(analysis_dir, searchlight_dir, out_fname)
out_rigid = op.join(analysis_dir, searchlight_dir, op.basename(add_suffix(out_file, "anat")))
##########################################
# Warp images
##########################################
# print subject_list
#
# for time in time_list:
#
# # update accordingly for time
# if len(time_list) > 1:
# in_file_spec = in_file.format(time=str(time))
# out_file_spec = out_file.format(time=str(time))
# out_rigid_spec = out_rigid.format(time=str(time))
# else:
# in_file_spec = in_file
# out_file_spec = out_file
# out_rigid_spec = out_rigid
#
# for subid in subject_list:
# print subid
#
# continuous_interp = dict(trilinear="trilin",
# spline="cubic")[interpolation]
# interp = "nearest" if "mask" in in_file_spec else continuous_interp
# cmdline_rigid = ["mri_vol2vol",
# "--mov", in_file_spec.format(subject_id=subid),
# "--reg", reg_templates['rigids'].format(run=1, subject_id=subid),
# "--fstarg",
# "--" + interp,
# "--o", out_rigid_spec.format(subject_id=subid),
# "--no-save-reg"]
# cmdline = " ".join(cmdline_rigid)
# print cmdline
# os.system(cmdline)
#
# continuous_interp = dict(trilinear="trilin",
# spline="BSpline")[interpolation]
# interp = "NN" if "mask" in in_file else continuous_interp
# cmdline_warp = ["WarpImageMultiTransform",
# "3",
# out_rigid_spec.format(subject_id=subid),
# out_file_spec.format(subject_id=subid),
# reg_templates['warpfield'].format(subject_id=subid),
# reg_templates['affine'].format(subject_id=subid),
# "-R", ref_file]
# if interp != "trilin":
# cmdline_warp.append("--use-" + interp)
# cmdline = " ".join(cmdline_warp)
# print cmdline
# os.system(cmdline)
##########################################
# Combine across subjects into 4d group image
##########################################
# if exp_type == 'localizer':
# cmdline_merge = ["fslmerge",
# "-t",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_4D",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_*_warp.nii.gz"]
# cmdline = " ".join(cmdline_merge)
# print cmdline
# os.system(cmdline)
#
# # fslmaths /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_4D -sub 0.3333 -Tmean /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_mean
# # fslmaths /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_4D -sub 0.3333 /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_4D
# elif exp_type == 'test':
#
# for time in time_list:
#
# # Create a 4D image from all the subjects data
# cmdline_merge = ["fslmerge",
# "-t",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D".format(time=str(time)),
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_*_warp.nii.gz".format(time=str(time))]
# cmdline = " ".join(cmdline_merge)
# print cmdline
# os.system(cmdline)
#
# # subtract chance, and take the mean across subjects
# cmdline_mean = ["fslmaths",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D".format(time=str(time)),
# "-sub 0.3333 -Tmean",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_mean".format(time=str(time))]
# cmdline = " ".join(cmdline_mean)
# print cmdline
# os.system(cmdline)
#
# # subtract chance (0.33) from 4D (for subsequent 1-sample t-testing against chance)
# cmdline_mean = ["fslmaths",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D".format(time=str(time)),
# "-sub 0.3333",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D".format(time=str(time))]
# cmdline = " ".join(cmdline_mean)
# print cmdline
# os.system(cmdline)
##########################################
# Plot group mean accuracy on surface
##########################################
# from surfer import Brain, project_volume_data
##### REINSTATEMENT
#################################
# for time in time_list:
# brain = Brain("fsaverage", "split", "inflated", views=['lat', 'med', 'ven'], background="white")
# volume_file = "/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_mean.nii.gz".format(time=str(time))
# reg_file = os.path.join(os.environ['FREESURFER_HOME'], "average/mni152.register.dat")
# for hemi in ['lh', 'rh']:
# zstat = project_volume_data(volume_file, hemi, reg_file, smooth_fwhm=0.5)
# brain.add_overlay(zstat, hemi=hemi, min=0.05)
# brain.save_image('/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_mean_surf.png'.format(time=str(time)))
##########################################
# 1-samp t-test (vs. chance)
##########################################
##### LOCALIZER
#################################
# cmdline = 'randomise_parallel -i /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_4D -o /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t -1 -T'
# print cmdline
# os.system(cmdline)
#cluster -i /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tfce_corrp_tstat1 -t 0.95 -c /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tfce_tstat1 --scalarname="1-p" > /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_corrp1.txt
# Threshold t-stats w/corrected pvalues
# fslmaths /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tfce_corrp_tstat1.nii.gz -thr 0.95 -bin /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tfce_corrp_tstat1_mask_p05.nii.gz
# fslmaths /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tstat1.nii.gz -mas /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tfce_corrp_tstat1_mask_p05.nii.gz /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tstat1_mask_p05corr.nii.gz
##### REINSTATEMENT
#################################
# for time in time_list:
# cmdline = 'randomise_parallel -i /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_4D -o /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t -1 -T'
# print cmdline
# os.system(cmdline)
# for time in time_list:
# # Threshold t-stats w/corrected pvalues
# cmdline = 'fslmaths /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tfce_corrp_tstat1.nii.gz -thr 0.95 -bin /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tfce_corrp_tstat1_mask_p05.nii.gz'
# print cmdline
# os.system(cmdline)
#
# cmdline = 'fslmaths /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tstat1.nii.gz -mas /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tfce_corrp_tstat1_mask_p05.nii.gz /Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tstat1_mask_p05corr.nii.gz'
# print cmdline
# os.system(cmdline)
##########################################
# Plot corrected t-stats (p < 0.05) on surface
##########################################
##### LOCALIZER
#################################
# from surfer import Brain, project_volume_data
#
# brain = Brain("fsaverage", "split", "inflated", views=['lat', 'med', 'ven'], background="white")
# volume_file = "/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tstat1_mask_p05corr.nii.gz"
#
# reg_file = os.path.join(os.environ['FREESURFER_HOME'], "average/mni152.register.dat")
#
# for hemi in ['lh', 'rh']:
# zstat = project_volume_data(volume_file, hemi, reg_file, smooth_fwhm=0.5)
# brain.add_overlay(zstat, hemi=hemi, min=1.96)
# brain.save_image('/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight/localizer_acc_t_tstat1_mask_p05corr.png')
#
##### REINSTATEMENT
#################################
# from surfer import Brain, project_volume_data
#
# for time in time_list:
# brain = Brain("fsaverage", "split", "inflated", views=['lat', 'med', 'ven'], background="white")
#
# volume_file = "/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time"+str(time)+"_acc_t_tstat1_mask_p05corr.nii.gz"
#
# reg_file = os.path.join(os.environ['FREESURFER_HOME'], "average/mni152.register.dat")
#
# for hemi in ['lh', 'rh']:
# zstat = project_volume_data(volume_file, hemi, reg_file, smooth_fwhm=0.5)
#
# if (zstat > 1.96).any():
# brain.add_overlay(zstat, hemi=hemi, min=1.96, max=10)
# brain.save_image('/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tstat1_mask_p05corr.png')
# brain.close()
# Plot yeo network overlaid on reinstatement
from surfer import Brain, project_volume_data
import os
time = 6
hemi = 'lh'
brain = Brain("fsaverage", hemi, "inflated", views=['lat', 'par'], background="white")
# volume_file = "/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time"+str(time)+"_acc_t_tstat1_mask_p05corr.nii.gz"
#
# reg_file = os.path.join(os.environ['FREESURFER_HOME'], "average/mni152.register.dat")
#
# zstat = project_volume_data(volume_file, hemi, reg_file, smooth_fwhm=0.5)
#
# if (zstat > 1.96).any():
# brain.add_overlay(zstat, hemi=hemi, min=1.96, max=10)
brain.add_label('17Networks_DefaultC_IPL', alpha=.8, color='navy', hemi=hemi, borders=True)
brain.add_label('17Networks_DefaultA_IPL', alpha=.8, color='purple', hemi=hemi, borders=True)
brain.add_label('dorsalattn', alpha=.8, color='green', hemi=hemi, borders=True)
brain.add_label('dorsalattn', alpha=.4, color='green', hemi=hemi, borders=False)
brain.add_label('superiorparietal', alpha=.8, color='black', hemi=hemi, borders=True)
brain.add_label('supramarginal', alpha=.8, color='black', hemi=hemi, borders=True)
brain.add_label('inferiorparietal', alpha=.8, color='black', hemi=hemi, borders=True)
brain.add_label('17Networks_DorsAttnA_SPL', alpha=.8, color='darkgreen', hemi=hemi, borders=True)
brain.add_label('17Networks_LH_DorsAttnA_TempOcc', alpha=.8, color='darkgreen', hemi=hemi, borders=True)
brain.add_label('17Networks_DorsAttnA_ParOcc', alpha=.8, color='lightblue', hemi=hemi, borders=True)
brain.add_label('17Networks_5', alpha=.8, color='orange', hemi=hemi, borders=True)
brain.add_label('17Networks_6', alpha=.8, color='orange', hemi=hemi, borders=True)
brain.save_image('/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time'+str(time)+'_acc_t_tstat1_mask_p05corr_YeoDefaultC_IPL.png')
brain.close()
##########################################
# Reinstatement 2-sample unpaired t-test between groups
##########################################
# basedir=/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test
# design_ttest2 $basedir/design 22 20
# then change around .mat file for ap158 (control)
## Remove ap168 and ap174
##########################################
# rm $basedir/*ap168*
# rm $basedir/*ap174*
#
# for time in time_list:
#
# # Create a 4D image from all the subjects data
# cmdline_merge = ["fslmerge",
# "-t",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D_rmbad".format(time=str(time)),
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_*_warp.nii.gz".format(time=str(time))]
# cmdline = " ".join(cmdline_merge)
# print cmdline
# os.system(cmdline)
#
# # subtract chance (0.33) from 4D (for subsequent 1-sample t-testing against chance)
# cmdline_mean = ["fslmaths",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D_rmbad".format(time=str(time)),
# "-sub 0.3333",
# "/share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time{time}_acc_4D_rmbad".format(time=str(time))]
# cmdline = " ".join(cmdline_mean)
# print cmdline
# os.system(cmdline)
# ls /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time6_acc_*_warp.nii.gz
## Average reinstatement over 4-10 sec window
##########################################
# fslmaths /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time4_acc_4D_rmbad.nii.gz \
# -add /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time6_acc_4D_rmbad.nii.gz \
# -add /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time8_acc_4D_rmbad.nii.gz \
# -add /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_time10_acc_4D_rmbad.nii.gz \
# -div 4 \
# /share/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_mean4to10_acc_4D_rmbad.nii.gz
#
# # run permutation test
# randomise -i $basedir/sourcehit_mean4to10_acc_4D_rmbad \
# -o $basedir/sourcehit_mean4to10_2sampT \
# -d $basedir/design.mat \
# -t $basedir/design.con \
# -m /share/awagner/sgagnon/AP/analysis/ap_memory_raw/group_control-stress/mni/CR/group_mask \
# -T
## Just plot ctrl>stress t-stats, thresholded at p < 0.01 uncorrected
# from surfer import Brain, project_volume_data
# import os
#
# thresh = 2.42 # for p < 0.01, were df = 40 (20+22 - 2)
# max_thresh = 4
# brain = Brain("fsaverage", "split", "inflated", views=['lat', 'med', 'ven'], background="white")
#
# volume_file = "/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_mean4to10_2sampT_tstat1.nii.gz"
#
# reg_file = os.path.join(os.environ['FREESURFER_HOME'], "average/mni152.register.dat")
#
# for hemi in ['lh', 'rh']:
# zstat = project_volume_data(volume_file, hemi, reg_file, smooth_fwhm=0.5)
#
# if (zstat > thresh).any():
# brain.add_overlay(zstat, hemi=hemi, min=thresh, max=max_thresh)
# brain.save_image('/Volumes/group/awagner/sgagnon/AP/analysis/mvpa_raw/searchlight_test/sourcehit_mean4to10_2sampT_tstat1_p01.png')
# brain.close()
|
|
"""Module with view factor calculation tools"""
from pvfactors.config import MIN_X_GROUND, MAX_X_GROUND, DISTANCE_TOLERANCE
from pvfactors.geometry.timeseries import TsLineCoords, TsPointCoords
from pvlib.tools import cosd, sind
import numpy as np
class VFTsMethods(object):
"""This class contains all the methods used to calculate timeseries
view factors for all the surfaces in
:py:class:`~pvfactors.geometry.pvarray.OrderedPVArray`"""
def vf_pvrow_gnd_surf(self, ts_pvrows, ts_ground, tilted_to_left,
vf_matrix):
"""Calculate the view factors between timeseries PV row and ground
surfaces, and assign it to the passed view factor matrix using
the surface indices.
Parameters
----------
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
ts_ground : :py:class:`~pvfactors.geometry.timeseries.TsGround`
Timeseries ground of the PV array
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
vf_matrix : np.ndarray
View factor matrix to update during calculation. Should have 3
dimensions as follows: [n_surfaces, n_surfaces, n_timesteps]
"""
n_pvrows = len(ts_pvrows)
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows):
# Separate gnd surfaces depending on side
left_gnd_surfaces = ts_ground.ts_surfaces_side_of_cut_point(
'left', idx_pvrow)
right_gnd_surfaces = ts_ground.ts_surfaces_side_of_cut_point(
'right', idx_pvrow)
# Front side
front = ts_pvrow.front
for pvrow_surf in front.all_ts_surfaces:
if pvrow_surf.is_empty:
# do no run calculation for this surface
continue
ts_length = pvrow_surf.length
i = pvrow_surf.index
for gnd_surf in left_gnd_surfaces:
if gnd_surf.is_empty:
# do no run this calculation
continue
j = gnd_surf.index
vf_pvrow_to_gnd, vf_gnd_to_pvrow = (
self.vf_pvrow_surf_to_gnd_surf_obstruction_hottel(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=False, is_left=True))
vf_matrix[i, j, :] = vf_pvrow_to_gnd
vf_matrix[j, i, :] = vf_gnd_to_pvrow
for gnd_surf in right_gnd_surfaces:
if gnd_surf.is_empty:
# do no run this calculation
continue
j = gnd_surf.index
vf_pvrow_to_gnd, vf_gnd_to_pvrow = (
self.vf_pvrow_surf_to_gnd_surf_obstruction_hottel(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=False, is_left=False))
vf_matrix[i, j, :] = vf_pvrow_to_gnd
vf_matrix[j, i, :] = vf_gnd_to_pvrow
# Back side
back = ts_pvrow.back
for pvrow_surf in back.all_ts_surfaces:
if pvrow_surf.is_empty:
# do no run calculation for this surface
continue
ts_length = pvrow_surf.length
i = pvrow_surf.index
for gnd_surf in left_gnd_surfaces:
if gnd_surf.is_empty:
# do no run this calculation
continue
j = gnd_surf.index
vf_pvrow_to_gnd, vf_gnd_to_pvrow = (
self.vf_pvrow_surf_to_gnd_surf_obstruction_hottel(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=True, is_left=True))
vf_matrix[i, j, :] = vf_pvrow_to_gnd
vf_matrix[j, i, :] = vf_gnd_to_pvrow
for gnd_surf in right_gnd_surfaces:
if gnd_surf.is_empty:
# do no run this calculation
continue
j = gnd_surf.index
vf_pvrow_to_gnd, vf_gnd_to_pvrow = (
self.vf_pvrow_surf_to_gnd_surf_obstruction_hottel(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=True, is_left=False))
vf_matrix[i, j, :] = vf_pvrow_to_gnd
vf_matrix[j, i, :] = vf_gnd_to_pvrow
def vf_pvrow_surf_to_gnd_surf_obstruction_hottel(
self, pvrow_surf, pvrow_idx, n_pvrows, tilted_to_left,
ts_pvrows, gnd_surf, pvrow_surf_length, is_back=True,
is_left=True):
"""Calculate view factors from timeseries PV row surface to a
timeseries ground surface. This will return the calculated view
factors from the PV row surface to the ground surface, AND from the
ground surface to the PV row surface (using reciprocity).
Parameters
----------
pvrow_surf : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Timeseries PV row surface to use for calculation
pvrow_idx : int
Index of the timeseries PV row on the which the pvrow_surf is
n_pvrows : int
Number of timeseries PV rows in the PV array, and therefore number
of shadows they cast on the ground
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
gnd_surf : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Timeseries ground surface to use for calculation
pvrow_surf_length : np.ndarray
Length (width) of the timeseries PV row surface [m]
is_back : bool
Flag specifying whether pv row surface is on back or front surface
(Default = True)
is_left : bool
Flag specifying whether gnd surface is left of pv row cut point or
not (Default = True)
Returns
-------
vf_pvrow_to_gnd_surf : np.ndarray
View factors from timeseries PV row surface to timeseries ground
surface, dimension is [n_timesteps]
vf_gnd_to_pvrow_surf : np.ndarray
View factors from timeseries ground surface to timeseries PV row
surface, dimension is [n_timesteps]
"""
pvrow_surf_lowest_pt = pvrow_surf.lowest_point
pvrow_surf_highest_pt = pvrow_surf.highest_point
no_obstruction = (is_left & (pvrow_idx == 0)) \
or ((not is_left) & (pvrow_idx == n_pvrows - 1))
if no_obstruction:
# There is no obstruction to the gnd surface
vf_pvrow_to_gnd_surf = self._vf_surface_to_surface(
pvrow_surf.coords, gnd_surf, pvrow_surf_length)
else:
# Get lowest point of obstructing point
idx_obstructing_pvrow = pvrow_idx - 1 if is_left else pvrow_idx + 1
pt_obstr = ts_pvrows[idx_obstructing_pvrow
].full_pvrow_coords.lowest_point
# Calculate vf from pv row to gnd surface
vf_pvrow_to_gnd_surf = self._vf_hottel_gnd_surf(
pvrow_surf_highest_pt, pvrow_surf_lowest_pt,
gnd_surf.b1, gnd_surf.b2, pt_obstr, pvrow_surf_length,
is_left)
# Final result depends on whether front or back surface
if is_left:
vf_pvrow_to_gnd_surf = (
np.where(tilted_to_left, 0., vf_pvrow_to_gnd_surf) if is_back
else np.where(tilted_to_left, vf_pvrow_to_gnd_surf, 0.))
else:
vf_pvrow_to_gnd_surf = (
np.where(tilted_to_left, vf_pvrow_to_gnd_surf, 0.) if is_back
else np.where(tilted_to_left, 0., vf_pvrow_to_gnd_surf))
# Use reciprocity to calculate ts vf from gnd surf to pv row surface
gnd_surf_length = gnd_surf.length
vf_gnd_to_pvrow_surf = np.where(
gnd_surf_length > DISTANCE_TOLERANCE,
vf_pvrow_to_gnd_surf * pvrow_surf_length / gnd_surf_length, 0.)
return vf_pvrow_to_gnd_surf, vf_gnd_to_pvrow_surf
def vf_pvrow_to_pvrow(self, ts_pvrows, tilted_to_left, vf_matrix):
"""Calculate the view factors between timeseries PV row surfaces,
and assign values to the passed view factor matrix using
the surface indices.
Parameters
----------
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
vf_matrix : np.ndarray
View factor matrix to update during calculation. Should have 3
dimensions as follows: [n_surfaces, n_surfaces, n_timesteps]
"""
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows[:-1]):
# Get the next pv row
right_ts_pvrow = ts_pvrows[idx_pvrow + 1]
# front side
front = ts_pvrow.front
for surf_i in front.all_ts_surfaces:
if surf_i.is_empty:
# do no run calculation for this surface
continue
i = surf_i.index
length_i = surf_i.length
for surf_j in right_ts_pvrow.back.all_ts_surfaces:
if surf_j.is_empty:
# do no run calculation for this surface
continue
j = surf_j.index
length_j = surf_j.length
vf_i_to_j = self._vf_surface_to_surface(
surf_i.coords, surf_j.coords, length_i)
vf_i_to_j = np.where(tilted_to_left, 0., vf_i_to_j)
vf_j_to_i = np.where(
surf_j.length > DISTANCE_TOLERANCE,
vf_i_to_j * length_i / length_j, 0.)
vf_matrix[i, j, :] = vf_i_to_j
vf_matrix[j, i, :] = vf_j_to_i
# back side
back = ts_pvrow.back
for surf_i in back.all_ts_surfaces:
if surf_i.is_empty:
# do no run calculation for this surface
continue
i = surf_i.index
length_i = surf_i.length
for surf_j in right_ts_pvrow.front.all_ts_surfaces:
if surf_j.is_empty:
# do no run calculation for this surface
continue
j = surf_j.index
length_j = surf_j.length
vf_i_to_j = self._vf_surface_to_surface(
surf_i.coords, surf_j.coords, length_i)
vf_i_to_j = np.where(tilted_to_left, vf_i_to_j, 0.)
vf_j_to_i = np.where(
surf_j.length > DISTANCE_TOLERANCE,
vf_i_to_j * length_i / length_j, 0.)
vf_matrix[i, j, :] = vf_i_to_j
vf_matrix[j, i, :] = vf_j_to_i
def calculate_vf_to_pvrow(self, pvrow_element_coords, pvrow_idx, n_pvrows,
n_steps, ts_pvrows, pvrow_element_length,
tilted_to_left, pvrow_width, rotation_vec):
"""Calculate view factors from timeseries pvrow element to timeseries
PV rows around it.
Parameters
----------
pvrow_element_coords :
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of pvrow_element
pvrow_idx : int
Index of the timeseries PV row on which the pvrow_element is
n_pvrows : int
Number of timeseries PV rows in the PV array
n_steps : int
Number of timesteps for which to calculate the pvfactors
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
Timeseries PV row geometries that will be used in the calculation
pvrow_element_length : float or np.ndarray
Length (width) of the timeseries pvrow element [m]
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
pvrow_width : float
Width of the timeseries PV rows in the PV array [m], which is
constant
rotation_vec : np.ndarray
Rotation angles of the PV rows [deg]
Returns
-------
vf_to_pvrow : np.ndarray
View factors from timeseries pvrow_element to neighboring PV rows
vf_to_shaded_pvrow : np.ndarray
View factors from timeseries pvrow_element to shaded areas of the
neighboring PV rows
"""
if pvrow_idx == 0:
vf_left_pvrow = np.zeros(n_steps)
vf_left_shaded_pvrow = np.zeros(n_steps)
else:
# Get vf to full pvrow
left_ts_pvrow = ts_pvrows[pvrow_idx - 1]
left_ts_pvrow_coords = left_ts_pvrow.full_pvrow_coords
vf_left_pvrow = self._vf_surface_to_surface(
pvrow_element_coords, left_ts_pvrow_coords,
pvrow_element_length)
# Get vf to shaded pvrow
shaded_coords = self._create_shaded_side_coords(
left_ts_pvrow.xy_center, pvrow_width,
left_ts_pvrow.front.shaded_length, tilted_to_left,
rotation_vec, left_ts_pvrow.full_pvrow_coords.lowest_point)
vf_left_shaded_pvrow = self._vf_surface_to_surface(
pvrow_element_coords, shaded_coords, pvrow_element_length)
if pvrow_idx == (n_pvrows - 1):
vf_right_pvrow = np.zeros(n_steps)
vf_right_shaded_pvrow = np.zeros(n_steps)
else:
# Get vf to full pvrow
right_ts_pvrow = ts_pvrows[pvrow_idx + 1]
right_ts_pvrow_coords = right_ts_pvrow.full_pvrow_coords
vf_right_pvrow = self._vf_surface_to_surface(
pvrow_element_coords, right_ts_pvrow_coords,
pvrow_element_length)
# Get vf to shaded pvrow
shaded_coords = self._create_shaded_side_coords(
right_ts_pvrow.xy_center, pvrow_width,
right_ts_pvrow.front.shaded_length, tilted_to_left,
rotation_vec, right_ts_pvrow.full_pvrow_coords.lowest_point)
vf_right_shaded_pvrow = self._vf_surface_to_surface(
pvrow_element_coords, shaded_coords, pvrow_element_length)
vf_to_pvrow = np.where(tilted_to_left, vf_right_pvrow, vf_left_pvrow)
vf_to_shaded_pvrow = np.where(tilted_to_left, vf_right_shaded_pvrow,
vf_left_shaded_pvrow)
return vf_to_pvrow, vf_to_shaded_pvrow
def calculate_vf_to_gnd(self, pvrow_element_coords, pvrow_idx, n_pvrows,
n_steps, y_ground, cut_point_coords,
pvrow_element_length, tilted_to_left, ts_pvrows):
"""Calculate view factors from timeseries pvrow_element to the entire
ground.
Parameters
----------
pvrow_element_coords :
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of pvrow element
pvrow_idx : int
Index of the timeseries PV row on the which the pvrow_element is
n_pvrows : int
Number of timeseries PV rows in the PV array
n_steps : int
Number of timesteps for which to calculate the pvfactors
y_ground : float
Y-coordinate of the flat ground [m]
cut_point_coords : list of
:py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
List of cut point coordinates, as calculated for timeseries PV rows
pvrow_element_length : float or np.ndarray
Length (width) of the timeseries pvrow_element [m]
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
Timeseries PV row geometries that will be used in the calculation
Returns
-------
vf_to_gnd : np.ndarray
View factors from timeseries pvrow_element to the entire ground
"""
pvrow_lowest_pt = ts_pvrows[pvrow_idx].full_pvrow_coords.lowest_point
if pvrow_idx == 0:
# There is no obstruction to view of the ground on the left
coords_left_gnd = TsLineCoords(
TsPointCoords(MIN_X_GROUND * np.ones(n_steps), y_ground),
TsPointCoords(np.minimum(MAX_X_GROUND, cut_point_coords.x),
y_ground))
vf_left_ground = self._vf_surface_to_surface(
pvrow_element_coords, coords_left_gnd, pvrow_element_length)
else:
# The left PV row obstructs the view of the ground on the left
left_pt_neighbor = \
ts_pvrows[pvrow_idx - 1].full_pvrow_coords.lowest_point
coords_gnd_proxy = TsLineCoords(left_pt_neighbor, pvrow_lowest_pt)
vf_left_ground = self._vf_surface_to_surface(
pvrow_element_coords, coords_gnd_proxy, pvrow_element_length)
if pvrow_idx == (n_pvrows - 1):
# There is no obstruction of the view of the ground on the right
coords_right_gnd = TsLineCoords(
TsPointCoords(np.maximum(MIN_X_GROUND, cut_point_coords.x),
y_ground),
TsPointCoords(MAX_X_GROUND * np.ones(n_steps), y_ground))
vf_right_ground = self._vf_surface_to_surface(
pvrow_element_coords, coords_right_gnd, pvrow_element_length)
else:
# The right PV row obstructs the view of the ground on the right
right_pt_neighbor = \
ts_pvrows[pvrow_idx + 1].full_pvrow_coords.lowest_point
coords_gnd_proxy = TsLineCoords(pvrow_lowest_pt, right_pt_neighbor)
vf_right_ground = self._vf_surface_to_surface(
pvrow_element_coords, coords_gnd_proxy, pvrow_element_length)
# Merge the views of the ground for the back side
vf_ground = np.where(tilted_to_left, vf_right_ground, vf_left_ground)
return vf_ground
def calculate_vf_to_shadow_obstruction_hottel(
self, pvrow_element, pvrow_idx, n_shadows, n_steps, tilted_to_left,
ts_pvrows, shadow_left, shadow_right, pvrow_element_length):
"""Calculate view factors from timeseries pvrow_element to the shadow
of a specific timeseries PV row which is casted on the ground.
Parameters
----------
pvrow_element : :py:class:`~pvfactors.geometry.timeseries.TsDualSegment` or :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Timeseries pvrow_element to use for calculation
pvrow_idx : int
Index of the timeseries PV row on the which the pvrow_element is
n_shadows : int
Number of timeseries PV rows in the PV array, and therefore number
of shadows they cast on the ground
n_steps : int
Number of timesteps for which to calculate the pvfactors
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
Timeseries PV row geometries that will be used in the calculation
shadow_left : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadow that are on the left side of the cut
point of the PV row on which the pvrow_element is
shadow_right : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadow that are on the right side of the cut
point of the PV row on which the pvrow_element is
pvrow_element_length : float or np.ndarray
Length (width) of the timeseries pvrow_element [m]
Returns
-------
vf_to_shadow : np.ndarray
View factors from timeseries pvrow_element to the ground shadow of
a specific timeseries PV row
"""
pvrow_element_lowest_pt = pvrow_element.lowest_point
pvrow_element_highest_pt = pvrow_element.highest_point
# Calculate view factors to left shadows
if pvrow_idx == 0:
# There is no obstruction on the left
vf_to_left_shadow = self._vf_surface_to_surface(
pvrow_element.coords, shadow_left, pvrow_element_length)
else:
# There is potential obstruction on the left
pt_obstr = ts_pvrows[pvrow_idx - 1].full_pvrow_coords.lowest_point
is_shadow_left = True
vf_to_left_shadow = self._vf_hottel_gnd_surf(
pvrow_element_highest_pt, pvrow_element_lowest_pt,
shadow_left.b1, shadow_left.b2, pt_obstr, pvrow_element_length,
is_shadow_left)
# Calculate view factors to right shadows
if pvrow_idx == n_shadows - 1:
# There is no obstruction on the right
vf_to_right_shadow = self._vf_surface_to_surface(
pvrow_element.coords, shadow_right, pvrow_element_length)
else:
# There is potential obstruction on the right
pt_obstr = ts_pvrows[pvrow_idx + 1].full_pvrow_coords.lowest_point
is_shadow_left = False
vf_to_right_shadow = self._vf_hottel_gnd_surf(
pvrow_element_highest_pt, pvrow_element_lowest_pt,
shadow_right.b1, shadow_right.b2, pt_obstr,
pvrow_element_length, is_shadow_left)
# Filter since we're considering the back surface only
vf_to_shadow = np.where(tilted_to_left, vf_to_right_shadow,
vf_to_left_shadow)
return vf_to_shadow
def _vf_hottel_gnd_surf(self, high_pt_pv, low_pt_pv, left_pt_gnd,
right_pt_gnd, obstr_pt, width, shadow_is_left):
"""
Calculate the timeseries view factors from a PV surface defined by low
and high points, to a ground surface defined by left and right points,
while accounting for potentially obstructing neighboring PV rows,
defined by an obstruction point, and all of this using the Hottel
String method.
Parameters
----------
high_pt_pv : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Highest point of the PV surface, for each timestamp
low_pt_pv : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Lowest point of the PV surface, for each timestamp
left_pt_gnd : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Leftmost point of the ground surface, for each timestamp
right_pt_gnd : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Rightmost point of the ground surface, for each timestamp
obstr_pt : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Obstructing point of neighboring PV row, for each timestamp
width : float or np.ndarray
Width of the PV row surface considered, from low to high point [m]
shadow_is_left : bool
Side of the considered shadow (or ground) surface with respect to
the edge point of the PV row on which the considered PV surface is
located
Returns
-------
vf_1_to_2 : np.ndarray
View factors from PV surface to ground (shadow) surface
"""
if shadow_is_left:
# When the shadow is left
# - uncrossed strings are high_pv - left_gnd and low_pv - right_gnd
# - crossed strings are high_pv - right_gnd and low_pv - left_gnd
l1 = self._hottel_string_length(high_pt_pv, left_pt_gnd, obstr_pt,
shadow_is_left)
l2 = self._hottel_string_length(low_pt_pv, right_pt_gnd, obstr_pt,
shadow_is_left)
d1 = self._hottel_string_length(high_pt_pv, right_pt_gnd, obstr_pt,
shadow_is_left)
d2 = self._hottel_string_length(low_pt_pv, left_pt_gnd, obstr_pt,
shadow_is_left)
else:
# When the shadow is right
# - uncrossed strings are high_pv - right_gnd and low_pv - left_gnd
# - crossed strings are high_pv - left_gnd and low_pv - right_gnd
l1 = self._hottel_string_length(high_pt_pv, right_pt_gnd, obstr_pt,
shadow_is_left)
l2 = self._hottel_string_length(low_pt_pv, left_pt_gnd, obstr_pt,
shadow_is_left)
d1 = self._hottel_string_length(high_pt_pv, left_pt_gnd, obstr_pt,
shadow_is_left)
d2 = self._hottel_string_length(low_pt_pv, right_pt_gnd, obstr_pt,
shadow_is_left)
vf_1_to_2 = (d1 + d2 - l1 - l2) / (2. * width)
# The formula doesn't work if surface is a point
vf_1_to_2 = np.where(width > DISTANCE_TOLERANCE, vf_1_to_2, 0.)
return vf_1_to_2
def _hottel_string_length(self, pt_pv, pt_gnd, pt_obstr, shadow_is_left):
"""
Calculate a string length as defined by the Hottel String method in the
calculation of view factors, which allows to account for obstructions.
Parameters
----------
left_pt_gnd : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Leftmost point of the ground surface, for each timestamp
right_pt_gnd : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Rightmost point of the ground surface, for each timestamp
obstr_pt : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Obstructing point of neighboring PV row, for each timestamp
shadow_is_left : bool
Side of the considered shadow (or ground) surface with respect to
the edge point of the PV row on which the considered PV surface is
located
Returns
-------
hottel_length : np.ndarray
Return timeseries length of the string, while accounting for
obstructions, in [m]
"""
# Calculate length of string without obstruction
l_pv = self._distance(pt_pv, pt_gnd)
if pt_obstr is None:
# There can't be any obstruction
hottel_length = l_pv
else:
# Determine if there is obstruction by using the angles made by
# specific strings with the x-axis
alpha_pv = self._angle_with_x_axis(pt_gnd, pt_pv)
alpha_ob = self._angle_with_x_axis(pt_gnd, pt_obstr)
if shadow_is_left:
is_obstructing = alpha_pv > alpha_ob
else:
is_obstructing = alpha_pv < alpha_ob
# Calculate length of string with obstruction
l_obstr = (self._distance(pt_gnd, pt_obstr)
+ self._distance(pt_obstr, pt_pv))
# Merge based on whether there is obstruction or not
hottel_length = np.where(is_obstructing, l_obstr, l_pv)
return hottel_length
def _vf_surface_to_surface(self, line_1, line_2, width_1):
"""Calculate view factors between timeseries line coords, and using
the Hottel String method for calculating view factors (without
obstruction).
Parameters
----------
line_1 : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of surface 1
line_2 : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of surface 2
width_1 : float or np.ndarray
Length of line_1 in [m]
Returns
-------
vf_1_to_2 : np.ndarray
View factors from line_1 to line_2, for each timestep
"""
length_1 = self._distance(line_1.b1, line_2.b1)
length_2 = self._distance(line_1.b2, line_2.b2)
length_3 = self._distance(line_1.b1, line_2.b2)
length_4 = self._distance(line_1.b2, line_2.b1)
sum_1 = length_1 + length_2
sum_2 = length_3 + length_4
vf_1_to_2 = np.abs(sum_2 - sum_1) / (2. * width_1)
# The formula doesn't work if the line is a point
vf_1_to_2 = np.where(width_1 > DISTANCE_TOLERANCE, vf_1_to_2, 0.)
return vf_1_to_2
@staticmethod
def _distance(pt_1, pt_2):
"""Calculate distance between two timeseries points
Parameters
----------
pt_1 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 1
pt_2 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 2
Returns
-------
np.ndarray
Distance between the two points, for each timestep
"""
return np.sqrt((pt_2.y - pt_1.y)**2 + (pt_2.x - pt_1.x)**2)
@staticmethod
def _angle_with_x_axis(pt_1, pt_2):
"""Angle with x-axis of vector going from pt_1 to pt_2
Parameters
----------
pt_1 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 1
pt_2 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 2
Returns
-------
np.ndarray
Angle between vector pt_1->pt_2 and x-axis
"""
return np.arctan2(pt_2.y - pt_1.y, pt_2.x - pt_1.x)
@staticmethod
def _create_shaded_side_coords(xy_center, width, shaded_length,
mask_tilted_to_left, rotation_vec,
side_lowest_pt):
"""
Create the timeseries line coordinates for the shaded portion of a
PV row side, based on inputted shaded length.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
shaded_length : np.ndarray
Timeseries values of side shaded length [m]
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
rotation_vec : np.ndarray
Timeseries rotation vector of the PV rows in [deg]
side_lowest_pt : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of lowest point of considered PV row side
Returns
-------
side_shaded_coords : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of the shaded portion of the PV row side
"""
# Get invariant values
x_center, y_center = xy_center
radius = width / 2.
# Calculate coords of shading point
r_shade = radius - shaded_length
x_sh = np.where(
mask_tilted_to_left,
r_shade * cosd(rotation_vec + 180.) + x_center,
r_shade * cosd(rotation_vec) + x_center)
y_sh = np.where(
mask_tilted_to_left,
r_shade * sind(rotation_vec + 180.) + y_center,
r_shade * sind(rotation_vec) + y_center)
side_shaded_coords = TsLineCoords(TsPointCoords(x_sh, y_sh),
side_lowest_pt)
return side_shaded_coords
|
|
from oslo.config import cfg
import addons
src = cfg.OptGroup(name='src',
title='Credentials and general config for source cloud')
src_opts = [
cfg.StrOpt('type', default='os',
help='os - OpenStack Cloud'),
cfg.StrOpt('auth_url', default='-',
help='Keystone service endpoint for authorization'),
cfg.StrOpt('host', default='-',
help='ip-address controller for cloud'),
cfg.StrOpt('ssh_host', default='',
help='ip-address of cloud node for ssh connect'),
cfg.StrOpt('ext_cidr', default='',
help='external network CIDR'),
cfg.StrOpt('user', default='-',
help='user for access to API'),
cfg.StrOpt('password', default='-',
help='password for access to API'),
cfg.StrOpt('tenant', default='-',
help='tenant for access to API'),
cfg.StrOpt('temp', default='-',
help='temporary directory on controller'),
cfg.StrOpt('service_tenant', default='services',
help='Tenant name for services'),
cfg.StrOpt('ssh_user', default='root',
help='user to connect via ssh'),
cfg.StrOpt('ssh_sudo_password', default='',
help='sudo password to connect via ssh, if any')
]
dst = cfg.OptGroup(name='dst',
title='Credentials and general '
'config for destination cloud')
dst_opts = [
cfg.StrOpt('type', default='os',
help='os - OpenStack Cloud'),
cfg.StrOpt('auth_url', default='-',
help='Keystone service endpoint for authorization'),
cfg.StrOpt('host', default='-',
help='ip-address controller for cloud'),
cfg.StrOpt('ssh_host', default='',
help='ip-address of cloud node for ssh connect'),
cfg.StrOpt('ext_cidr', default='',
help='external network CIDR'),
cfg.StrOpt('user', default='-',
help='user for access to API'),
cfg.StrOpt('password', default='-',
help='password for access to API'),
cfg.StrOpt('tenant', default='-',
help='tenant for access to API'),
cfg.StrOpt('temp', default='-',
help='temporary directory on controller'),
cfg.StrOpt('service_tenant', default='services',
help='Tenant name for services'),
cfg.StrOpt('ssh_user', default='root',
help='user to connect via ssh'),
cfg.StrOpt('ssh_sudo_password', default='',
help='sudo password to connect via ssh, if any')
]
migrate = cfg.OptGroup(name='migrate',
title='General config for migration process')
migrate_opts = [
cfg.BoolOpt('keep_user_passwords', default=True,
help='True - keep user passwords, '
'False - not keep user passwords'),
cfg.StrOpt('key_filename', default='id_rsa',
help='name pub key'),
cfg.BoolOpt('keep_ip', default=False,
help='yes - keep ip, no - not keep ip'),
cfg.BoolOpt('migrate_extnets', default=False,
help='yes - migrate external networks, no - do not migrate external networks'),
cfg.StrOpt('ext_net_map', default='configs/ext_net_map.yaml',
help='path to the map of external networks, which contains '
'references between old and new ids'),
cfg.BoolOpt('keep_floatingip', default=False,
help='yes - keep floatingip, no - not keep floatingip'),
cfg.StrOpt('cinder_migration_strategy',
default='cloudferrylib.os.storage.cinder_storage.CinderStorage',
help='path to class that will perform cinder migration actions'),
cfg.BoolOpt('keep_lbaas', default=False,
help='yes - keep lbaas settings, no - not keep lbaas settings'),
cfg.BoolOpt('keep_volume_snapshots', default=False,
help='yes - keep volume snapshots, no - not keep volume snapshots'),
cfg.BoolOpt('keep_volume_storage', default=False,
help='True - keep volume_storage, False - not keep volume_storage'),
cfg.StrOpt('speed_limit', default='10MB',
help='speed limit for glance to glance'),
cfg.StrOpt('instances', default='key_name-qwerty',
help='filter instance by parameters'),
cfg.StrOpt('file_compression', default='dd',
help='gzip - use GZIP when file transferring via ssh, '
' - no compression, directly via dd'),
cfg.IntOpt('level_compression', default='7',
help='level compression for gzip'),
cfg.StrOpt('ssh_transfer_port', default='9990',
help='interval ports for ssh tunnel'),
cfg.StrOpt('port', default='9990',
help='interval ports for ssh tunnel'),
cfg.BoolOpt('overwrite_user_passwords', default=False,
help='Overwrite password for exists users on destination'),
cfg.BoolOpt('migrate_quotas', default=False,
help='Migrate tenant quotas'),
cfg.StrOpt('disk_format', default='qcow2',
help='format when covert volume to image'),
cfg.StrOpt('container_format', default='bare',
help='container format when covert volume to image'),
cfg.BoolOpt('direct_compute_transfer', default=False,
help='Direct data transmission between compute nodes via external network'),
cfg.StrOpt('filter_path', default='configs/filter.yaml',
help='path to the filter yaml file with options for search resources'),
cfg.IntOpt('retry', default='7',
help='Number retry if except Performing error'),
cfg.IntOpt('time_wait', default=5,
help='Time wait if except Performing error'),
cfg.IntOpt('ssh_chunk_size', default=100,
help='Size of one chunk to transfer via SSH'),
cfg.StrOpt('group_file_path',
help='Path to file with the groups of VMs'),
cfg.BoolOpt('all_networks', default=False,
help="Migrate all network resources from all tenants"),
cfg.BoolOpt('all_volumes', default=False,
help="Migrate all volume resources from all tenants"),
cfg.BoolOpt('all_vms', default=False,
help="Migrate all VM's from all tenants. User, specified in "
"the 'dst' section of config also should have admin role "
"in all tenants."),
cfg.BoolOpt('all_images', default=False,
help='Migrate images of all tenants'),
cfg.BoolOpt('skip_down_hosts', default=True,
help="If set to True, removes unreachable compute hosts from "
"nova hypervisor list. Otherwise migration process fails "
"with unrecoverable error if host is down."),
cfg.StrOpt('scenario', default='scenario/migrate.yaml',
help='Path to a scenario file, which holds the whole migration '
'procedure. Must be YAML format'),
cfg.StrOpt('tasks_mapping', default='scenario/tasks.yaml',
help='Path to a file which holds CloudFerry python code tasks '
'mapped to migration scenario items. Items defined in '
'this file must be used in the migration scenario.'),
cfg.BoolOpt('migrate_users', default=True,
help='Migrate users'),
cfg.BoolOpt('migrate_user_quotas', default=True,
help='Migrate user quotas. If it set in "false" only tenant '
'quotas will be migrated. Use this in case when '
'OpenStack does not support user quotas (e.g. Grizzly)'),
]
mail = cfg.OptGroup(name='mail',
title='Mail credentials for notifications')
mail_opts = [
cfg.StrOpt('server', default='-',
help='name mail server'),
cfg.StrOpt('username', default='-',
help='name username for mail'),
cfg.StrOpt('password', default='-',
help='password for mail'),
cfg.StrOpt('from_addr', default='-',
help='field FROM in letter')
]
src_mysql = cfg.OptGroup(name='src_mysql',
title='Config mysql for source cloud')
src_mysql_opts = [
cfg.StrOpt('user', default='-',
help='user for mysql'),
cfg.StrOpt('password', default='-',
help='password for mysql'),
cfg.StrOpt('host', default='-',
help='host of mysql'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
]
src_rabbit = cfg.OptGroup(name='src_rabbit',
title='Config RabbitMQ for source cloud')
src_rabbit_opts = [
cfg.StrOpt('user', default='guest',
help='user for RabbitMQ'),
cfg.StrOpt('password', default='guest',
help='password for RabbitMQ'),
cfg.StrOpt('hosts', default='-',
help='comma separated RabbitMQ hosts')
]
src_compute = cfg.OptGroup(name='src_compute',
title='Config service for compute')
src_compute_opts = [
cfg.StrOpt('service', default='nova',
help='name service for compute'),
cfg.StrOpt('backend', default='ceph',
help='backend for ephemeral drives'),
cfg.StrOpt('convert_diff_file', default='qcow2',
help='convert diff file to'),
cfg.StrOpt('convert_ephemeral_disk', default='qcow2',
help='convert ephemeral disk to'),
cfg.StrOpt('host_eph_drv', default='-',
help='host ephemeral drive')
]
src_storage = cfg.OptGroup(name='src_storage',
title='Config service for storage')
src_storage_opts = [
cfg.StrOpt('service', default='cinder',
help='name service for storage'),
cfg.StrOpt('backend', default='iscsi',
help='backend for storage'),
cfg.StrOpt('host', default='',
help='storage node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('protocol_transfer', default='GLANCE',
help='mode transporting volumes GLANCE or SSH'),
cfg.StrOpt('disk_format', default='qcow2',
help='convert volume'),
cfg.StrOpt('volume_name_template', default='volume-',
help='template for creating names of volumes on storage backend'),
cfg.StrOpt('rbd_pool', default='volumes',
help='name of pool for volumes in Ceph RBD storage'),
cfg.StrOpt('snapshot_name_template', default='snapshot-',
help='template for creating names of snapshots on storage backend')
]
src_image = cfg.OptGroup(name='src_image',
title='Config service for images')
src_image_opts = [
cfg.StrOpt('service', default='glance',
help='name service for images'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('host', default='',
help='glance mysql node ip address'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('backend', default='file',
help='backend for images')
]
src_identity = cfg.OptGroup(name='src_identity',
title='Config service for identity')
src_identity_opts = [
cfg.StrOpt('service', default='keystone',
help='name service for keystone')
]
src_network = cfg.OptGroup(name='src_network',
title='Config service for network')
src_network_opts = [
cfg.StrOpt('service', default='auto',
help='name service for network, '
'auto - detect avaiable service')
]
src_objstorage = cfg.OptGroup(name='src_objstorage',
title='Config service for object storage')
src_objstorage_opts = [
cfg.StrOpt('service', default='swift',
help='service name for object storage')
]
dst_mysql = cfg.OptGroup(name='dst_mysql',
title='Config mysql for destination cloud')
dst_mysql_opts = [
cfg.StrOpt('user', default='-',
help='user for mysql'),
cfg.StrOpt('password', default='-',
help='password for mysql'),
cfg.StrOpt('host', default='-',
help='host of mysql'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
]
dst_rabbit = cfg.OptGroup(name='dst_rabbit',
title='Config RabbitMQ for source cloud')
dst_rabbit_opts = [
cfg.StrOpt('user', default='guest',
help='user for RabbitMQ'),
cfg.StrOpt('password', default='guest',
help='password for RabbitMQ'),
cfg.StrOpt('hosts', default='-',
help='comma separated RabbitMQ hosts')
]
dst_compute = cfg.OptGroup(name='dst_compute',
title='Config service for compute')
dst_compute_opts = [
cfg.StrOpt('service', default='nova',
help='name service for compute'),
cfg.StrOpt('backend', default='ceph',
help='backend for ephemeral drives'),
cfg.StrOpt('convert_diff_file', default='qcow2',
help='convert diff file to'),
cfg.StrOpt('convert_ephemeral_disk', default='qcow2',
help='convert ephemeral disk to'),
cfg.StrOpt('host_eph_drv', default='-',
help='host ephemeral drive'),
cfg.FloatOpt('cpu_allocation_ratio', default='16',
help='cpu allocation ratio'),
cfg.FloatOpt('ram_allocation_ratio', default='1',
help='ram allocation ratio'),
cfg.FloatOpt('disk_allocation_ratio', default='0.9',
help='disk allocation ratio')
]
dst_storage = cfg.OptGroup(name='dst_storage',
title='Config service for storage')
dst_storage_opts = [
cfg.StrOpt('service', default='cinder',
help='name service for storage'),
cfg.StrOpt('backend', default='iscsi',
help='backend for storage'),
cfg.StrOpt('host', default='',
help='storage node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('protocol_transfer', default='GLANCE',
help='mode transporting volumes GLANCE or SSH'),
cfg.StrOpt('disk_format', default='qcow2',
help='convert volume'),
cfg.StrOpt('volume_name_template', default='volume-',
help='template for creating names of volumes on storage backend'),
cfg.StrOpt('rbd_pool', default='volumes',
help='name of pool for volumes in Ceph RBD storage'),
cfg.StrOpt('snapshot_name_template', default='snapshot-',
help='template for creating names of snapshots on storage backend')
]
dst_image = cfg.OptGroup(name='dst_image',
title='Config service for images')
dst_image_opts = [
cfg.StrOpt('service', default='glance',
help='name service for images'),
cfg.BoolOpt('convert_to_raw', default='True',
help='convert to raw images'),
cfg.StrOpt('host', default='',
help='glance mysql node ip address'),
cfg.StrOpt('user', default='',
help='user for db access (if backend == db)'),
cfg.StrOpt('password', default='',
help='password for db access (if backend == db)'),
cfg.StrOpt('database_name', default='',
help='cinder_database name (if backend == db)'),
cfg.StrOpt('connection', default='mysql+mysqlconnector',
help='driver for connection'),
cfg.StrOpt('backend', default='file',
help='backend for images')
]
dst_identity = cfg.OptGroup(name='dst_identity',
title='Config service for identity')
dst_identity_opts = [
cfg.StrOpt('service', default='keystone',
help='name service for keystone')
]
dst_network = cfg.OptGroup(name='dst_network',
title='Config service for network')
dst_network_opts = [
cfg.StrOpt('service', default='auto',
help='name service for network, '
'auto - detect available service'),
cfg.ListOpt('interfaces_for_instance', default='net04',
help='list interfaces for connection to instance')
]
dst_objstorage = cfg.OptGroup(name='dst_objstorage',
title='Config service for object storage')
dst_objstorage_opts = [
cfg.StrOpt('service', default='swift',
help='service name for object storage')
]
import_rules = cfg.OptGroup(name='import_rules',
title='Import Rules for '
'overwrite something fields')
import_rules_opts = [
cfg.StrOpt('key', default='',
help=''),
]
snapshot = cfg.OptGroup(name='snapshot',
title="Rules for snapshot")
snapshot_opts = [
cfg.StrOpt('snapshot_path', default="dump.sql")]
initial_check = cfg.OptGroup(name='initial_check',
title='Some configuration to initial checks')
initial_check_opts = [
cfg.IntOpt('claimed_bandwidth', default=100,
help='Claimed bandwidth of network (Mb/s).'),
cfg.FloatOpt('factor', default=0.5,
help='The percentage of the allowable loss of network speed'),
cfg.IntOpt('test_file_size', default=100,
help='Size of testing file to send/receive via network (MB).'),
]
condense = cfg.OptGroup(name='condense',
title="options for condensation")
condense_opts = [
cfg.StrOpt('nova_file'),
cfg.StrOpt('node_file'),
cfg.StrOpt('group_file'),
cfg.IntOpt('ram_reduction_coef', default=1),
cfg.IntOpt('core_reduction_coef', default=4),
cfg.IntOpt('precision', default=85)]
database = cfg.OptGroup(name="database",
title="options for database")
database_opts = [
cfg.StrOpt("host", default="localhost"),
cfg.IntOpt("port", default=6379)]
cfg_for_reg = [
(src, src_opts),
(dst, dst_opts),
(migrate, migrate_opts),
(mail, mail_opts),
(src_mysql, src_mysql_opts),
(src_rabbit, src_rabbit_opts),
(src_compute, src_compute_opts),
(src_storage, src_storage_opts),
(src_identity, src_identity_opts),
(src_image, src_image_opts),
(src_network, src_network_opts),
(src_objstorage, src_objstorage_opts),
(dst_mysql, dst_mysql_opts),
(dst_rabbit, dst_rabbit_opts),
(dst_compute, dst_compute_opts),
(dst_storage, dst_storage_opts),
(dst_identity, dst_identity_opts),
(dst_image, dst_image_opts),
(dst_network, dst_network_opts),
(dst_objstorage, dst_objstorage_opts),
(snapshot, snapshot_opts),
(import_rules, import_rules_opts),
(initial_check, initial_check_opts),
(condense, condense_opts),
(database, database_opts),
(import_rules, import_rules_opts)
]
CONF = cfg.CONF
name_configs = ['configs/config.ini']
def init_config(name_config=None):
for i in cfg_for_reg:
CONF.register_group(i[0])
CONF.register_opts(i[1], i[0])
if name_config:
name_configs[0] = name_config
CONF(default_config_files=name_configs, args="")
def get_plugins():
plugins = addons
dir_plugins = dir(plugins)
exclude_field = ['__author__', '__builtins__', '__doc__', '__file__',
'__name__', '__package__', '__path__']
plugins = [(item, plugins.__dict__[item])
for item in dir_plugins if item not in exclude_field]
return plugins
def find_group(group):
for g in xrange(len(cfg_for_reg)):
if group.name == cfg_for_reg[g][0].name:
return g
return -1
def find_field(field, fields):
for g in xrange(len(fields)):
if field.name == fields[g].name:
return g
return -1
def merge_fields(index_pair, fields):
for field in fields:
index_field = find_field(field, cfg_for_reg[index_pair][1])
if index_field >= 0:
cfg_for_reg[index_pair][1][index_field] = field
else:
cfg_for_reg[index_pair][1].append(field)
def merge_cfg(cfg):
for pair in cfg:
index_pair = find_group(pair[0])
if index_pair == -1:
cfg_for_reg.append(pair)
else:
merge_fields(index_pair, pair[1])
def collector_configs_plugins():
plugins = get_plugins()
for plugin in plugins:
merge_cfg(plugin[1].cfg_for_reg)
name_configs.append('addons/%s/configs/config.ini' % plugin[0])
if __name__ == '__main__':
collector_configs_plugins()
init_config()
|
|
"""Bridges between the `asyncio` module and Tornado IOLoop.
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
it possible to combine the two libraries on the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple
loops.
.. note::
Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
Windows. Use the `~asyncio.SelectorEventLoop` instead.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import functools
import salt.ext.tornado.concurrent
from salt.ext.tornado.gen import convert_yielded
from salt.ext.tornado.ioloop import IOLoop
from salt.ext.tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
# TODO: this is racy; we need a lock to ensure that the
# loop isn't closed during call_soon_threadsafe.
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage::
from salt.ext.tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False, **kwargs)
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage::
from salt.ext.tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
"""
def initialize(self, **kwargs):
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
"""
tf = salt.ext.tornado.concurrent.Future()
salt.ext.tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future):
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
"""
tornado_future = convert_yielded(tornado_future)
af = asyncio.Future()
salt.ext.tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
|
|
from __future__ import unicode_literals
from collections import OrderedDict
import six
class Memoize(object):
"""
This class is meant to be used as a decorator.
It decorates a class so that values can be cached (and later pruned from that cache).
Usage:
class Foo(object):
@Memoize(2, Memoize.FIFO) #means that max_size == 2 and we want to use a FIFO.
def double(self, x):
return x * 2
or
@Memoize
def double(x):
return x * 2
This implementation supposes that the arguments are already immutable and won't change.
If some function needs special behavior, this class should be subclassed and _GetCacheKey
should be overridden.
Note that the 1st parameter will determine whether it should be used as an instance method
or a function (It'll just check if the 1st parameter is 'self', and if it is, an
instance method is used). If this behavior is not wanted, the memo_target must be forced
to MEMO_INSTANCE_METHOD or MEMO_FUNCTION.
Note that non-declared keyword arguments (`**kwargs`) are forbidden. Offer proper support for it may cause a
prohibitive overhead.
"""
# This should be the simplest (and fastest) way of caching things: what gets in first
# is removed first.
FIFO = 'FIFO'
LRU = 'LRU'
MEMO_INSTANCE_METHOD = 'instance_method'
MEMO_FUNCTION = 'function'
MEMO_FROM_ARGSPEC = 'from_argspec'
def __new__(cls, *args, **kwargs):
"""
We have to override __new__ so that we treat receiving it with and without parameters,
as the parameters are both optional and we want to support receiving it without parameters.
E.g.:
@Memoize
def double(x):
return x * 2
"""
if not kwargs and len(args) == 1 and not isinstance(args[0], int):
# We received a single argument and it's a function (no parameters received:
# at this point we have to really instance and already make the __call__)
ret = object.__new__(cls)
ret.__class__.__init__(ret)
ret = ret.__call__(args[0])
return ret
ret = object.__new__(cls)
return ret
def __init__(self, maxsize=50, prune_method=FIFO, memo_target=MEMO_FROM_ARGSPEC):
"""
:param int maxsize:
The maximum size of the internal cache (default is 50).
:param unicode prune_method:
This is according to the way used to prune entries. Right now only
pruning the oldest entry is supported (FIFO), but different ways could be
available (e.g.: pruning LRU seems a natural addition)
:param unicode memo_target:
One of the constants MEMO_INSTANCE_METHOD or MEMO_FUNCTION or MEMO_FROM_ARGSPEC.
When from argspec it'll try to see if the 1st parameter is 'self' and if it is,
it'll fall to using the MEMO_INSTANCE_METHOD (otherwise the MEMO_FUNCTION is used)
If the signature of the function is 'special' and doesn't follow the conventions,
the memo_target MUST be specified.
"""
self._prune_method = prune_method
self._maxsize = maxsize
self._memo_target = memo_target
def _GetCacheKey(self, args, kwargs):
"""
Subclasses may override to provide a different cache key. The default implementation
just handles the arguments.
:param list args:
The arguments received.
:param dict kwargs:
The keyword arguments received.
:return tuple:
A tuple representing a call to our memoized function.
This tuple is normalized with values for all arguments that the function receives,
based on `args` and `kwargs` passed in this call, in addition to any default values.
"""
# `argspec` list explicitly declared parameters and their default values.
has_default, argspec = self._argspec
if kwargs:
named_arguments_count = len(argspec)
# If we received kwargs, set them in our argspec, as if that was the default value.
argspec = argspec.copy()
for k, v in kwargs.iteritems():
argspec[k] = v
# If argspec have been extended we got non-declared keyword arguments. And that is forbidden.
if named_arguments_count != len(argspec):
raise ValueError('Can\'t use non-declared keyword arguments.')
elif not has_default:
# If we got not kwargs, and there is no default all parameters must be present (and maybe some varargs).
# The calling arguments alone are a good cache key.
return args
# Obtain key from the args we received, plus whatever defaults we have in our argspec.
return args + tuple(list(argspec.values())[len(args):])
def _GetArgspecObject(self, args, trail, kwargs, defaults):
"""
Create the argspec object that helps when creating the cache key. Subclasses may want to customize the argspec
object to help offer customized cache key generation algorithm.
:param list(unicode) args:
The names of the explicit arguments.
:param unicode trail:
The variable name used to store varargs.
`None` if no varargs are accepted.
:param unicode kwargs:
The variable name used to store non-explict keyword arguments.
`None` if no non-explicit keyword arguments are accepted.
:param tuple(object) defaults:
The default values (when existent) for the variable listed in the `args` parameter.
When not all parameter have default values this tuple will have less elements than `args`. Given the
function `def Foo(a, b, c=3, d=4): pass` than `args == ['a', 'b', 'c', 'd']` and `defaults == (3, 4)`.
`None` if there are no defaults.
:rtype: object
:return:
This object will be set as `self._argspec`, this object can be used by `self._GetCacheKey`.
The base class uses a tuple with a bool indication if default are present and a `coilib50.basic.odict.odict`
that is a mapping of "parameter name" -> "default value" (a string is used when the is no default).
"""
named_arguments = OrderedDict()
if kwargs is not None:
raise TypeError(
'Non-declared keyword arguments (`**kwargs`) not supported.'
' Note that Memoize must be the first decorator (nearest to the function) used.'
)
if defaults is None:
has_defaults = False
defaults = ()
else:
has_defaults = True
if self._memo_target == self.MEMO_INSTANCE_METHOD:
args = args[1:] # Ignore self when dealing with instance method
first_default = len(args) - len(defaults)
for i, arg in enumerate(args):
if i < first_default:
named_arguments[arg] = '@Memoize: no_default'
else:
named_arguments[arg] = defaults[i - first_default]
return has_defaults, named_arguments
def __call__(self, func):
"""
:param function func:
This is the function which should be decorated.
:return function:
The function decorated to cache the values based on the arguments.
"""
import inspect
if self._memo_target == self.MEMO_FROM_ARGSPEC:
check_func = func
if inspect.ismethod(check_func):
check_func = check_func.im_func
if not inspect.isfunction(check_func):
if type(check_func) == classmethod:
raise TypeError(
'To declare a classmethod with Memoize, the Memoize must be called before '
'the classmethod\n(will work as a global cache where cls will be part of the '
'cache-key).')
else:
raise TypeError('Expecting a function/method/classmethod for Memoize.')
else:
if 'self' in six.get_function_code(check_func).co_varnames:
self._memo_target = self.MEMO_INSTANCE_METHOD
else:
# If it's a classmethod, it should enter here (and the cls will
# be used as a part of the cache key, so, all should work properly).
self._memo_target = self.MEMO_FUNCTION
# Register argspec details, these are used to normalize cache keys
self._argspec = self._GetArgspecObject(*inspect.getargspec(func))
# Create call wrapper, and make it look like the real function
call = self._CreateCallWrapper(func)
if six.PY2:
call.func_name = func.func_name
call.__name__ = func.__name__
call.__doc__ = func.__doc__
return call
def _CreateCacheObject(self):
"""
Creates the cache object we want.
:returns object:
The object to be used as the cache (will prune items after the maximum size
is reached).
This object has a dict interface.
"""
if self._prune_method == self.FIFO:
from zerotk.fifo import FIFO
return FIFO(self._maxsize)
elif self._prune_method == self.LRU:
from zerotk.lru import LRU
return LRU(self._maxsize)
else:
raise AssertionError('Memoize prune method not supported: %s' % self._prune_method)
def _CreateCallWrapper(self, func):
"""
This function creates a FIFO cache
:param object func:
This is the function that is being cached.
"""
SENTINEL = []
if self._memo_target == self.MEMO_INSTANCE_METHOD:
outer_self = self
cache_name = '__%s_cache__' % func.__name__
def Call(self, *args, **kwargs):
cache = getattr(self, cache_name, None)
if cache is None:
cache = outer_self._CreateCacheObject()
setattr(self, cache_name, cache)
#--- GetFromCacheOrCreate: inlined for speed
key = outer_self._GetCacheKey(args, kwargs)
res = cache.get(key, SENTINEL)
if res is SENTINEL:
res = func(self, *args, **kwargs)
cache[key] = res
return res
def ClearCache(self):
"""
Clears the cache for a given instance (note that self must be passed as a parameter).
"""
cache = getattr(self, cache_name, None)
if cache is not None:
cache.clear()
Call.ClearCache = ClearCache
return Call
elif self._memo_target == self.MEMO_FUNCTION:
# When it's a function, we can use the same cache the whole time (i.e.: it's global)
cache = self._CreateCacheObject()
def Call(*args, **kwargs):
#--- GetFromCacheOrCreate: inlined for speed
key = self._GetCacheKey(args, kwargs)
res = cache.get(key, SENTINEL)
if res is SENTINEL:
res = func(*args, **kwargs)
cache[key] = res
return res
Call.ClearCache = cache.clear
return Call
else:
raise AssertionError("Don't know how to deal with memo target: %s" % self._memo_target)
|
|
#A script to create custom POX flows
#http://github.com/abh15/pox-flowgen
#import random
#fname="poxscript_"+str(random.randint(1000,9999))+".py"
#uncomment above lines if each script with a new name is required
fname="controllerScript.py"
file=open(fname,'w')
file.close()
#----create a new empty file----
target=open(fname,'w')
target.write("\"\"\"\nScript created by POX custom flow generator (PCFG)\n\"\"\"\n")
target.write("from pox.core import core \nfrom pox.lib.addresses import IPAddr \nfrom pox.lib.addresses import EthAddr \nimport pox.openflow.libopenflow_01 as of")
target.write("\nlog = core.getLogger()\n")
#----print importing
def check(): #call match again or exit depending on if i/p is A or M
f=raw_input("Enter A to move to action\nM to stay in match menu\n>")
if f=="M":
print t
q=raw_input(">")
match(int(q))
else:
pass
#---check if want to stay in match menu i.e to create multiple matches-----
def check2(str1,str2): #call actions again or exit depending on if i/p is A or N
global msg
msg.append(str1+str2) #create/concat array of all used actions
f=raw_input("Enter A to stay in action\nF to create Flow\n>")
if f=="A":
print t2
q=raw_input(">")
actions(int(q))
else:
pass
#---check if want to stay in action menu i.e to create multiple actions----
def checkflows():
global fooflows
fooflows=fooflows-1
if fooflows>0:
fl()
else:
checkswitch()
#-------keeps in flow menu alive until all flows are specified---------
def checkswitch():
global foox
foox=foox-1
if foox>0:
switch()
fl()
else:
pass
#-------keeps in switch/flow menu until all switches/flows are specified---------
def match(k):
#global name
def inport():
f=raw_input("Enter inport>")
target.write(name+"msg.match.in_port="+str(f)+"\n")
check() #check if more matching actions are going to be added
def dltype():
f=raw_input("Enter dltype>")
target.write(name+"msg.match.dl_type="+str(f)+"\n")
check()
def nwtos():
f=raw_input("Enter nwtos>")
target.write(name+"msg.match.nw_tos="+str(f)+"\n")
check()
def nwproto():
f=raw_input("Enter nwproto>")
target.write(name+"msg.match.nw_proto="+str(f)+"\n")
check()
def nwsrc():
f=raw_input("Enter nwsrc>")
target.write(name+"msg.match.nw_src=IPAddr(\""+f+"\")\n")
check()
def nwdst():
f=raw_input("Enter nwdst>")
target.write(name+"msg.match.nw_dst=IPAddr(\""+f+"\")\n")
check()
def dlvlan():
f=raw_input("Enter dlvlan>")
target.write(name+"msg.match.dl_vlan="+str(f))
target.write("\n")
check()
def dlvlanpcp():
f=raw_input("Enter dlvlanpcp>")
target.write(name+"msg.match.dl_vlan_pcp="+str(f))
target.write("\n")
check()
def dlsrc():
f=raw_input("Enter dlsrc>")
target.write(name+"msg.match.dl_src = EthAddr(\""+f+"\")\n")
check()
def dldst():
f=raw_input("Enter dldst>")
target.write(name+"msg.match.dl_dst = EthAddr(\""+f+"\")\n")
check()
def tpsrc():
f=raw_input("Enter tpsrc>")
target.write(name+"msg.match.tp_src="+str(f))
target.write("\n")
check()
def tpdst():
f=raw_input("Enter tp dst>")
target.write(name+"msg.match.tp_dst="+str(f))
target.write("\n")
check()
def priority():
f=raw_input("Enter priority>")
target.write(name+"msg.priority="+str(f))
target.write("\n")
check()
options={1:inport,2:dltype,3:nwtos,4:nwproto,5:nwsrc,
6:nwdst,7:dlvlan,8:dlvlanpcp,9:dlsrc,10:dldst,11:tpsrc,12:tpdst,13:priority} #func_dictionary
target.write("\n#"+name+" Match structure\n")
target.write(baz[int(sw_no)]+"="+str(dpid)+"\n") #write dpid
target.write(name+"msg = of.ofp_flow_mod()\n")
target.write(name+"msg.cookie = 0\n")
options[k]() #call the k'th function depending upon user input
#----------matching structure---------
def actions(k):
#name of current flow instance
def vlan_id():
v=raw_input("Enter vlan id>")
target.write(name+"vlan_id = of.ofp_action_vlan_vid (vlan_vid="+str(v)+")")
target.write("\n")
check2(name,"vlan_id") #check if more actions are going to be added
def stripvlan():
v=raw_input("Enter stripvlan yes or no>")
if v=="yes":
target.write(name+"stripvlan = of.ofp_action_strip_vlan ()")
target.write("\n")
else:
target.write("\n")
check2(name,"stripvlan")
def out():
v=raw_input("Enter out port>")
target.write(name+"out = of.ofp_action_output(port ="+str(v)+")")
target.write("\n")
check2(name,"out")
def vlanPriority():
v=raw_input("Enter vlan priority>")
target.write(name+"vlanPriority = of.ofp_action_vlan_pcp (vlan_pcp="+str(v)+")")
target.write("\n")
check2(name,"vlanPriority")
def enqueue():
v=raw_input("Enter enq>")
target.write(name+"flow0enqueue = of.ofp_action_enqueue (enqueue = "+str(v)+")")
target.write("\n")
check2(name,"enqueue")
def srcPort():
v=raw_input("Enter srcport>")
target.write(name+"srcPort = of.ofp_action_tp_port.set_src = (tp_port = "+str(v)+")")
target.write("\n")
check2(name,"srcPort")
def dstPort():
v=raw_input("Enter destport>")
target.write(name+"dstPort = of.ofp_action_tp_port.set_dst = (tp_port = "+str(v)+")")
target.write("\n")
check2(name,"dstport")
def srcMAC():
v=raw_input("Enter src MAC add>")
target.write(name+"srcMAC = of.ofp_action_dl_addr.set_src(EthAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"srcMAC")
def dstMAC():
v=raw_input("Enter dst MAC add>")
target.write(name+"dstMAC = of.ofp_action_dl_addr.set_dst(EthAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"dstMAC")
def srcIP():
v=raw_input("Enter source IP>")
target.write(name+"srcIP = of.ofp_action_nw_addr.set_src(IPAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"srcIP")
def dstIP():
v=raw_input("Enter dstIP>")
target.write(name+"dstIP = of.ofp_action_nw_addr.set_dst(IPAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"dstIP")
def tos():
v=raw_input("Enter tos>")
target.write(name+"tos = of.ofp_action_nw_tos (nw_tos = "+str(v)+")")
target.write("\n")
check2(name,"tos")
options={1:vlan_id,2:stripvlan,3:out,4:vlanPriority,5:enqueue,
6:srcPort,7:dstPort,8:srcMAC,9:dstMAC,10:srcIP,11:dstIP,12:tos}
options[k]() #select action based on user input
#----------actions structure---------
global foox
x=raw_input("Enter no. of switches\n>")
foox=int(x)
y=[]
baz=[]
msg=[]
bar=[]
#------------main------------
def switch(): #get number of switches,flows & dpid
global fooflows
global baz
global y
global sw_no
global dpid
global flows
print "Select switch:\n"
for i in xrange(0,int(x)):
print str(i)+":"+"\tswitch"+str(i)
baz.append("switch"+str(i))
sw_no=raw_input(">")
defaultdpid=int(sw_no)+1
tbp=raw_input("Enter DPID of switch(a hex no.) or D for default dpid\n>")
if (tbp=="D"):
dpid=oct(int(str(defaultdpid),10))
else:
dpid=oct(int(tbp,16))
flows=raw_input("Enter no of flows\n>")
fooflows=int(flows) #used for checkswitch func, possibly buggy
y.append(int(flows)) #create list of no. of flowmsgs per switch for sendToDPID msgs
#----------switch structure------------
def fl(): #display available match/actions & get them
global msg
global bar
global fl_no
global t
global t2
global q
global name
print "Select flow:\n" #display flows to choose from
for i in xrange(0,int(flows)):
print str(i)+":"+"\tflow"+str(sw_no)+"_"+str(i)
bar.append("flow"+str(sw_no)+"_"+str(i))
fl_no=raw_input(">")
name="flow"+sw_no+"_"+fl_no
t= "\n1:inport\n2:dltype\n3:nwtos\n4:nwproto\n5:nwsrc\n6:nwdst\n7:dlvlan\n8:dlvlanpcp\n9:dlsrc\n10:dldst\n11:tpsrc\n12:tpdstn\n13:Priority"
print t #choose a match & call match func
q=raw_input(">")
match(int(q))
#----------------end match
t2="1:vlanid\n2:stripvlan\n3:outport\n4:vlanprior\n5:enqueue\n6:srcport\n7:dstport\n8:srcmac\n9:dstmac\n10:srcip\n11:dstip\n12:tos"
print t2
w=raw_input(">")
target.write("# ACTIONS----------------\n")
actions(int(w)) #choose a action & call action func
target.write(name+"msg.actions="+str(msg).replace('\'','')+"\n") #print the msg arrsy containing actions used
msg=[]
#--------end actions
checkflows() #check for more flows
#-----------flows structure------------
switch()
fl()
#call functions in first iteration
target.write("\ndef install_flows(): \n\tlog.info(\" ### Installing static flows... ###\")\n")
for i in xrange(0,int(x)):
for j in xrange(0,y[i]):
target.write("\tcore.openflow.sendToDPID(switch"+str(i)+",flow"+str(i)+"_"+str(j)+"msg)\n")
target.write("\tlog.info(\"### Static flows installed. ###\")\n")
#---print function to install flows-----
target.write("def launch (): \n\tlog.info(\"####Starting...####\")\n\tcore.callDelayed (15, install_flows)\n\tlog.info(\"### Waiting for switches to connect.. ###\")")
#---print the launch function-----
target.close() #save file
print "Done :)"
|
|
import pickle
import codecs
from hashlib import md5
from datetime import datetime
from elasticsearch_dsl import document, field, Mapping
from elasticsearch_dsl.exceptions import ValidationException, IllegalOperation
from pytest import raises
class MyInner(field.InnerObjectWrapper):
pass
class MyDoc(document.DocType):
title = field.Keyword()
name = field.Text()
created_at = field.Date()
inner = field.Object(properties={'old_field': field.Text()}, doc_class=MyInner)
class MySubDoc(MyDoc):
name = field.Keyword()
class Meta:
doc_type = 'my_custom_doc'
index = 'default-index'
class MyDoc2(document.DocType):
extra = field.Long()
class MyMultiSubDoc(MyDoc2, MySubDoc):
pass
class DocWithNested(document.DocType):
comments = field.Nested(
properties={
'title': field.Text(),
'tags': field.Keyword(multi=True)
}
)
class SimpleCommit(document.DocType):
files = field.Text(multi=True)
class Meta:
index = 'test-git'
class Secret(str): pass
class SecretField(field.CustomField):
builtin_type = 'text'
def _serialize(self, data):
return codecs.encode(data, 'rot_13')
def _deserialize(self, data):
if isinstance(data, Secret):
return data
return Secret(codecs.decode(data, 'rot_13'))
class SecretDoc(document.DocType):
title = SecretField(index='no')
class NestedSecret(document.DocType):
secrets = field.Nested(properties={'title': SecretField()})
class OptionalObjectWithRequiredField(document.DocType):
comments = field.Nested(properties={'title': field.Keyword(required=True)})
def test_optional_inner_objects_are_not_validated_if_missing():
d = OptionalObjectWithRequiredField()
assert d.full_clean() is None
def test_custom_field():
s = SecretDoc(title=Secret('Hello'))
assert {'title': 'Uryyb'} == s.to_dict()
assert s.title == 'Hello'
s.title = 'Uryyb'
assert s.title == 'Hello'
assert isinstance(s.title, Secret)
def test_custom_field_mapping():
assert {
'secret_doc': {
'properties': {
'title': {'index': 'no', 'type': 'text'}
}
}
} == SecretDoc._doc_type.mapping.to_dict()
def test_custom_field_in_nested():
s = NestedSecret()
s.secrets.append({'title': Secret('Hello')})
assert {'secrets': [{'title': 'Uryyb'}]} == s.to_dict()
assert s.secrets[0].title == 'Hello'
def test_multi_works_after_doc_has_been_saved():
c = SimpleCommit()
c.full_clean()
c.files.append('setup.py')
assert c.to_dict() == {'files': ['setup.py']}
def test_multi_works_in_nested_after_doc_has_been_serialized():
# Issue #359
c = DocWithNested(comments=[{'title': 'First!'}])
assert [] == c.comments[0].tags
assert {'comments': [{'title': 'First!'}]} == c.to_dict()
assert [] == c.comments[0].tags
def test_null_value_for_object():
d = MyDoc(inner=None)
assert d.inner is None
def test_inherited_doc_types_can_override_index():
class MyDocDifferentIndex(MySubDoc):
class Meta:
index = 'not-default-index'
assert MyDocDifferentIndex._doc_type.index == 'not-default-index'
assert MyDocDifferentIndex().meta.index == 'not-default-index'
def test_to_dict_with_meta():
d = MySubDoc(title='hello')
d.meta.parent = 'some-parent'
assert {
'_index': 'default-index',
'_parent': 'some-parent',
'_type': 'my_custom_doc',
'_source': {'title': 'hello'},
} == d.to_dict(True)
def test_to_dict_with_meta_includes_custom_index():
d = MySubDoc(title='hello')
d.meta.index = 'other-index'
assert {
'_index': 'other-index',
'_type': 'my_custom_doc',
'_source': {'title': 'hello'},
} == d.to_dict(True)
def test_attribute_can_be_removed():
d = MyDoc(title='hello')
del d.title
assert 'title' not in d._d_
def test_doc_type_can_be_correctly_pickled():
d = DocWithNested(title='Hello World!', comments=[{'title': 'hellp'}], meta={'id': 42})
s = pickle.dumps(d)
d2 = pickle.loads(s)
assert d2 == d
assert 42 == d2.meta.id
assert 'Hello World!' == d2.title
assert [{'title': 'hellp'}] == d2.comments
def test_meta_is_accessible_even_on_empty_doc():
d = MyDoc()
d.meta
d = MyDoc(title='aaa')
d.meta
def test_meta_field_mapping():
class User(document.DocType):
username = field.Text()
class Meta:
all = document.MetaField(enabled=False)
_index = document.MetaField(enabled=True)
dynamic = document.MetaField('strict')
dynamic_templates = document.MetaField([42])
assert {
'user': {
'properties': {
'username': {'type': 'text'}
},
'_all': {'enabled': False},
'_index': {'enabled': True},
'dynamic': 'strict',
'dynamic_templates': [42]
}
} == User._doc_type.mapping.to_dict()
def test_multi_value_fields():
class Blog(document.DocType):
tags = field.Keyword(multi=True)
b = Blog()
assert [] == b.tags
b.tags.append('search')
b.tags.append('python')
assert ['search', 'python'] == b.tags
def test_docs_with_properties():
class User(document.DocType):
pwd_hash = field.Text()
def check_password(self, pwd):
return md5(pwd).hexdigest() == self.pwd_hash
@property
def password(self):
raise AttributeError('readonly')
@password.setter
def password(self, pwd):
self.pwd_hash = md5(pwd).hexdigest()
u = User(pwd_hash=md5(b'secret').hexdigest())
assert u.check_password(b'secret')
assert not u.check_password(b'not-secret')
u.password = b'not-secret'
assert 'password' not in u._d_
assert not u.check_password(b'secret')
assert u.check_password(b'not-secret')
with raises(AttributeError):
u.password
def test_nested_can_be_assigned_to():
d1 = DocWithNested(comments=[{'title': 'First!'}])
d2 = DocWithNested()
d2.comments = d1.comments
assert d2.comments == [{'title': 'First!'}]
assert {'comments': [{'title': 'First!'}]} == d2.to_dict()
def test_nested_can_be_none():
d = DocWithNested(comments=None, title='Hello World!')
assert {"title": 'Hello World!'} == d.to_dict()
def test_nested_defaults_to_list_and_can_be_updated():
md = DocWithNested()
assert [] == md.comments
md.comments.append({'title': 'hello World!'})
assert {'comments': [{'title': 'hello World!'}]} == md.to_dict()
def test_to_dict_is_recursive_and_can_cope_with_multi_values():
md = MyDoc(name=['a', 'b', 'c'])
md.inner = [{'old_field': 'of1'}, {'old_field': 'of2'}]
assert isinstance(md.inner[0], MyInner)
assert {
'name': ['a', 'b', 'c'],
'inner': [{'old_field': 'of1'}, {'old_field': 'of2'}],
} == md.to_dict()
def test_to_dict_ignores_empty_collections():
md = MyDoc(name='', address={}, count=0, valid=False, tags=[])
assert {'name': '', 'count': 0, 'valid': False} == md.to_dict()
def test_declarative_mapping_definition():
assert issubclass(MyDoc, document.DocType)
assert hasattr(MyDoc, '_doc_type')
assert 'my_doc' == MyDoc._doc_type.name
assert {
'my_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'text'},
'title': {'type': 'keyword'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'text'}}
}
}
}
} == MyDoc._doc_type.mapping.to_dict()
def test_you_can_supply_own_mapping_instance():
class MyD(document.DocType):
title = field.Text()
class Meta:
mapping = Mapping('my_d')
mapping.meta('_all', enabled=False)
assert {
'my_d': {
'_all': {'enabled': False},
'properties': {'title': {'type': 'text'}}
}
} == MyD._doc_type.mapping.to_dict()
def test_document_can_be_created_dynamically():
n = datetime.now()
md = MyDoc(title='hello')
md.name = 'My Fancy Document!'
md.created_at = n
inner = md.inner
# consistent returns
assert inner is md.inner
inner.old_field = 'Already defined.'
md.inner.new_field = ['undefined', 'field']
assert {
'title': 'hello',
'name': 'My Fancy Document!',
'created_at': n,
'inner': {
'old_field': 'Already defined.',
'new_field': ['undefined', 'field']
}
} == md.to_dict()
def test_invalid_date_will_raise_exception():
md = MyDoc()
with raises(ValidationException):
md.created_at = 'not-a-date'
def test_document_inheritance():
assert issubclass(MySubDoc, MyDoc)
assert issubclass(MySubDoc, document.DocType)
assert hasattr(MySubDoc, '_doc_type')
assert 'my_custom_doc' == MySubDoc._doc_type.name
assert {
'my_custom_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'keyword'},
'title': {'type': 'keyword'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'text'}}
}
}
}
} == MySubDoc._doc_type.mapping.to_dict()
def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict():
md = MySubDoc(meta={'id': 42}, name='My First doc!')
md.meta.index = 'my-index'
assert md.meta.index == 'my-index'
assert md.meta.id == 42
assert {'name': 'My First doc!'} == md.to_dict()
assert {'id': 42, 'index': 'my-index'} == md.meta.to_dict()
def test_meta_inheritance():
assert issubclass(MyMultiSubDoc, MySubDoc)
assert issubclass(MyMultiSubDoc, MyDoc2)
assert issubclass(MyMultiSubDoc, document.DocType)
assert hasattr(MyMultiSubDoc, '_doc_type')
# doc_type should not be inherited
assert 'my_multi_sub_doc' == MyMultiSubDoc._doc_type.name
# index and using should be
assert MyMultiSubDoc._doc_type.index == MySubDoc._doc_type.index
assert MyMultiSubDoc._doc_type.using == MySubDoc._doc_type.using
assert {
'my_multi_sub_doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'keyword'},
'title': {'type': 'keyword'},
'inner': {
'type': 'object',
'properties': {'old_field': {'type': 'text'}}
},
'extra': {'type': 'long'}
}
}
} == MyMultiSubDoc._doc_type.mapping.to_dict()
def test_meta_fields_can_be_accessed_directly_with_underscore():
p = object()
md = MyDoc(_id=42, title='Hello World!')
md._parent = p
assert md.meta.id == 42
assert md._id == 42
assert md.meta.parent is md._parent is p
def test_save_no_index(mock_client):
md = MyDoc()
with raises(ValidationException):
md.save(using='mock')
def test_delete_no_index(mock_client):
md = MyDoc()
with raises(ValidationException):
md.delete(using='mock')
def test_update_no_fields():
md = MyDoc()
with raises(IllegalOperation):
md.update()
def test_search_with_custom_alias_and_index(mock_client):
search_object = MyDoc.search(
using="staging",
index=["custom_index1", "custom_index2"])
assert search_object._using == "staging"
assert search_object._index == ["custom_index1", "custom_index2"]
def test_from_es_respects_underscored_non_meta_fields():
doc = {
"_index": "test-index",
"_type": "company",
"_id": "elasticsearch",
"_score": 12.0,
"fields": {
"hello": "world",
"_routing": "es",
"_tags": ["search"]
},
"_source": {
"city": "Amsterdam",
"name": "Elasticsearch",
"_tagline": "You know, for search"
}
}
class Company(document.DocType):
pass
c = Company.from_es(doc)
assert c.to_dict() == {'city': 'Amsterdam', 'hello': 'world', 'name': 'Elasticsearch', "_tags": ["search"], "_tagline": "You know, for search"}
|
|
"""Support for Tasmota sensors."""
from __future__ import annotations
from datetime import datetime
from typing import Any
from hatasmota import const as hc, sensor as tasmota_sensor, status_sensor
from hatasmota.entity import TasmotaEntity as HATasmotaEntity
from hatasmota.models import DiscoveryHashType
from homeassistant.components import sensor
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CO2,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
ENTITY_CATEGORY_DIAGNOSTIC,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEVICE_CLASS = "device_class"
STATE_CLASS = "state_class"
ICON = "icon"
# A Tasmota sensor type may be mapped to either a device class or an icon, not both
SENSOR_DEVICE_CLASS_ICON_MAP = {
hc.SENSOR_AMBIENT: {DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE},
hc.SENSOR_APPARENT_POWERUSAGE: {DEVICE_CLASS: DEVICE_CLASS_POWER},
hc.SENSOR_BATTERY: {
DEVICE_CLASS: DEVICE_CLASS_BATTERY,
STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
hc.SENSOR_CCT: {ICON: "mdi:temperature-kelvin"},
hc.SENSOR_CO2: {DEVICE_CLASS: DEVICE_CLASS_CO2},
hc.SENSOR_COLOR_BLUE: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_GREEN: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_RED: {ICON: "mdi:palette"},
hc.SENSOR_CURRENT: {ICON: "mdi:alpha-a-circle-outline"},
hc.SENSOR_DEWPOINT: {ICON: "mdi:weather-rainy"},
hc.SENSOR_DISTANCE: {ICON: "mdi:leak"},
hc.SENSOR_ECO2: {ICON: "mdi:molecule-co2"},
hc.SENSOR_FREQUENCY: {ICON: "mdi:current-ac"},
hc.SENSOR_HUMIDITY: {
DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
hc.SENSOR_ILLUMINANCE: {DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE},
hc.SENSOR_STATUS_IP: {ICON: "mdi:ip-network"},
hc.SENSOR_STATUS_LINK_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_MOISTURE: {ICON: "mdi:cup-water"},
hc.SENSOR_STATUS_MQTT_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_PB0_3: {ICON: "mdi:flask"},
hc.SENSOR_PB0_5: {ICON: "mdi:flask"},
hc.SENSOR_PB10: {ICON: "mdi:flask"},
hc.SENSOR_PB1: {ICON: "mdi:flask"},
hc.SENSOR_PB2_5: {ICON: "mdi:flask"},
hc.SENSOR_PB5: {ICON: "mdi:flask"},
hc.SENSOR_PM10: {ICON: "mdi:air-filter"},
hc.SENSOR_PM1: {ICON: "mdi:air-filter"},
hc.SENSOR_PM2_5: {ICON: "mdi:air-filter"},
hc.SENSOR_POWERFACTOR: {ICON: "mdi:alpha-f-circle-outline"},
hc.SENSOR_POWERUSAGE: {DEVICE_CLASS: DEVICE_CLASS_POWER},
hc.SENSOR_PRESSURE: {
DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
hc.SENSOR_PRESSUREATSEALEVEL: {
DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
hc.SENSOR_PROXIMITY: {ICON: "mdi:ruler"},
hc.SENSOR_REACTIVE_POWERUSAGE: {DEVICE_CLASS: DEVICE_CLASS_POWER},
hc.SENSOR_STATUS_LAST_RESTART_TIME: {DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP},
hc.SENSOR_STATUS_RESTART_REASON: {ICON: "mdi:information-outline"},
hc.SENSOR_STATUS_SIGNAL: {DEVICE_CLASS: DEVICE_CLASS_SIGNAL_STRENGTH},
hc.SENSOR_STATUS_RSSI: {ICON: "mdi:access-point"},
hc.SENSOR_STATUS_SSID: {ICON: "mdi:access-point-network"},
hc.SENSOR_TEMPERATURE: {
DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
hc.SENSOR_TODAY: {DEVICE_CLASS: DEVICE_CLASS_ENERGY},
hc.SENSOR_TOTAL: {
DEVICE_CLASS: DEVICE_CLASS_ENERGY,
STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
},
hc.SENSOR_TOTAL_START_TIME: {ICON: "mdi:progress-clock"},
hc.SENSOR_TVOC: {ICON: "mdi:air-filter"},
hc.SENSOR_VOLTAGE: {ICON: "mdi:alpha-v-circle-outline"},
hc.SENSOR_WEIGHT: {ICON: "mdi:scale"},
hc.SENSOR_YESTERDAY: {DEVICE_CLASS: DEVICE_CLASS_ENERGY},
}
SENSOR_UNIT_MAP = {
hc.CONCENTRATION_MICROGRAMS_PER_CUBIC_METER: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
hc.CONCENTRATION_PARTS_PER_BILLION: CONCENTRATION_PARTS_PER_BILLION,
hc.CONCENTRATION_PARTS_PER_MILLION: CONCENTRATION_PARTS_PER_MILLION,
hc.ELECTRICAL_CURRENT_AMPERE: ELECTRIC_CURRENT_AMPERE,
hc.ELECTRICAL_VOLT_AMPERE: POWER_VOLT_AMPERE,
hc.ENERGY_KILO_WATT_HOUR: ENERGY_KILO_WATT_HOUR,
hc.FREQUENCY_HERTZ: FREQUENCY_HERTZ,
hc.LENGTH_CENTIMETERS: LENGTH_CENTIMETERS,
hc.LIGHT_LUX: LIGHT_LUX,
hc.MASS_KILOGRAMS: MASS_KILOGRAMS,
hc.PERCENTAGE: PERCENTAGE,
hc.POWER_WATT: POWER_WATT,
hc.PRESSURE_HPA: PRESSURE_HPA,
hc.SIGNAL_STRENGTH_DECIBELS: SIGNAL_STRENGTH_DECIBELS,
hc.SIGNAL_STRENGTH_DECIBELS_MILLIWATT: SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
hc.SPEED_KILOMETERS_PER_HOUR: SPEED_KILOMETERS_PER_HOUR,
hc.SPEED_METERS_PER_SECOND: SPEED_METERS_PER_SECOND,
hc.SPEED_MILES_PER_HOUR: SPEED_MILES_PER_HOUR,
hc.TEMP_CELSIUS: TEMP_CELSIUS,
hc.TEMP_FAHRENHEIT: TEMP_FAHRENHEIT,
hc.TEMP_KELVIN: TEMP_KELVIN,
hc.VOLT: ELECTRIC_POTENTIAL_VOLT,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota sensor dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota sensor."""
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN),
async_discover,
)
class TasmotaSensor(TasmotaAvailability, TasmotaDiscoveryUpdate, SensorEntity):
"""Representation of a Tasmota sensor."""
_tasmota_entity: tasmota_sensor.TasmotaSensor
def __init__(self, **kwds: Any) -> None:
"""Initialize the Tasmota sensor."""
self._state: Any | None = None
self._state_timestamp: datetime | None = None
super().__init__(
**kwds,
)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.sensor_state_updated)
await super().async_added_to_hass()
@callback
def sensor_state_updated(self, state: Any, **kwargs: Any) -> None:
"""Handle state updates."""
if self.device_class == DEVICE_CLASS_TIMESTAMP:
self._state_timestamp = state
else:
self._state = state
self.async_write_ha_state()
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(STATE_CLASS)
@property
def entity_category(self) -> str | None:
"""Return the category of the entity, if any."""
if self._tasmota_entity.quantity in status_sensor.SENSORS:
return ENTITY_CATEGORY_DIAGNOSTIC
return None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Hide fast changing status sensors
if self._tasmota_entity.quantity in (
hc.SENSOR_STATUS_IP,
hc.SENSOR_STATUS_RSSI,
hc.SENSOR_STATUS_SIGNAL,
hc.SENSOR_STATUS_VERSION,
):
return False
return True
@property
def icon(self) -> str | None:
"""Return the icon."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(ICON)
@property
def native_value(self) -> str | None:
"""Return the state of the entity."""
if self._state_timestamp and self.device_class == DEVICE_CLASS_TIMESTAMP:
return self._state_timestamp.isoformat()
return self._state
@property
def force_update(self) -> bool:
"""Force update."""
return True
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return SENSOR_UNIT_MAP.get(self._tasmota_entity.unit, self._tasmota_entity.unit)
|
|
# -*- coding: utf-8 -*-
"""This module defines commonly used pieces for widgets"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from logging import getLogger
from math import sqrt
from builtins import str
from collections import defaultdict
from wcwidth import wcswidth, wcwidth
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from asciimatics.screen import Screen
# Logging
logger = getLogger(__name__)
#: Standard palettes for use with :py:meth:`~Frame.set_theme`.
#: Each entry in THEMES contains a colour palette for use by the widgets within a Frame.
#: Each colour palette is a dictionary mapping a colour key to a 3-tuple of
#: (foreground colour, attribute, background colour).
#: The "default" theme defines all the required keys for a palette.
THEMES = {
"default": {
"background": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"shadow": (Screen.COLOUR_BLACK, None, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"invalid": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLUE),
"borders": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"scroll": (Screen.COLOUR_CYAN, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLUE),
"edit_text": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
"readonly": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_readonly": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_CYAN),
"button": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
"control": (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_control": (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_CYAN),
"field": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_field": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_field": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
},
"monochrome": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"green": defaultdict(
lambda: (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"title": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"bright": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"tlj256": defaultdict(
lambda: (16, 0, 15),
{
"invalid": (0, 0, 196),
"label": (88, 0, 15),
"title": (88, 0, 15),
"selected_focus_field": (15, 0, 88),
"focus_edit_text": (15, 0, 88),
"focus_button": (15, 0, 88),
"selected_focus_control": (15, 0, 88),
"disabled": (8, 0, 15),
}
),
"warning": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_RED),
{
"label": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_YELLOW),
"focus_control": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"disabled": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"shadow": (Screen.COLOUR_BLACK, None, Screen.COLOUR_BLACK),
}
),
}
def _enforce_width(text, width, unicode_aware=True):
"""
Enforce a displayed piece of text to be a certain number of cells wide. This takes into
account double-width characters used in CJK languages.
:param text: The text to be truncated
:param width: The screen cell width to enforce
:return: The resulting truncated text
"""
# Double-width strings cannot be more than twice the string length, so no need to try
# expensive truncation if this upper bound isn't an issue.
if (2 * len(text) < width) or (len(text) < width and not unicode_aware):
return text
# Can still optimize performance if we are not handling unicode characters.
if unicode_aware:
size = 0
for i, char in enumerate(str(text)):
c_width = wcwidth(char) if ord(char) >= 256 else 1
if size + c_width > width:
return text[0:i]
size += c_width
elif len(text) + 1 > width:
return text[0:width]
return text
def _find_min_start(text, max_width, unicode_aware=True, at_end=False):
"""
Find the starting point in the string that will reduce it to be less than or equal to the
specified width when displayed on screen.
:param text: The text to analyze.
:param max_width: The required maximum width
:param at_end: At the end of the editable line, so allow spaced for cursor.
:return: The offset within `text` to start at to reduce it to the required length.
"""
# Is the solution trivial? Worth optimizing for text heavy UIs...
if 2 * len(text) < max_width:
return 0
# OK - do it the hard way...
result = 0
string_len = wcswidth if unicode_aware else len
char_len = wcwidth if unicode_aware else lambda x: 1
display_end = string_len(text)
while display_end > max_width:
result += 1
display_end -= char_len(text[0])
text = text[1:]
if at_end and display_end == max_width:
result += 1
return result
def _get_offset(text, visible_width, unicode_aware=True):
"""
Find the character offset within some text for a given visible offset (taking into account the
fact that some character glyphs are double width).
:param text: The text to analyze
:param visible_width: The required location within that text (as seen on screen).
:return: The offset within text (as a character offset within the string).
"""
result = 0
width = 0
if unicode_aware:
for char in text:
if visible_width - width <= 0:
break
result += 1
width += wcwidth(char)
if visible_width - width < 0:
result -= 1
else:
result = min(len(text), visible_width)
return result
@lru_cache(256)
def _split_text(text, width, height, unicode_aware=True):
"""
Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text.
"""
# At a high level, just try to split on whitespace for the best results.
tokens = text.split(" ")
result = []
current_line = ""
string_len = wcswidth if unicode_aware else len
for token in tokens:
for i, line_token in enumerate(token.split("\n")):
if string_len(current_line + line_token) > width or i > 0:
# Don't bother inserting completely blank lines
# which should only happen on the very first
# line (as the rest will inject whitespace/newlines)
if len(current_line) > 0:
result.append(current_line.rstrip())
current_line = line_token + " "
else:
current_line += line_token + " "
# At this point we've either split nicely or have a hugely long unbroken string
# (e.g. because the language doesn't use whitespace.
# Either way, break this last line up as best we can.
current_line = current_line.rstrip()
while string_len(current_line) > 0:
new_line = _enforce_width(current_line, width, unicode_aware)
result.append(new_line)
current_line = current_line[len(new_line):]
# Check for a height overrun and truncate.
if len(result) > height:
result = result[:height]
result[height - 1] = result[height - 1][:width - 3] + "..."
# Very small columns could be shorter than individual words - truncate
# each line if necessary.
for i, line in enumerate(result):
if len(line) > width:
result[i] = line[:width - 3] + "..."
return result
def _euclidian_distance(widget1, widget2):
"""
Find the Euclidian distance between 2 widgets.
:param widget1: first widget
:param widget2: second widget
"""
point1 = widget1.get_location()
point2 = widget2.get_location()
return sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
|
|
# orm/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc
from .mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers,
reconstructor,
validates
)
from .interfaces import (
EXT_CONTINUE,
EXT_STOP,
PropComparator,
)
from .deprecated_interfaces import (
MapperExtension,
SessionExtension,
AttributeExtension,
)
from .util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
was_deleted,
with_parent,
with_polymorphic,
)
from .properties import ColumnProperty
from .relationships import RelationshipProperty
from .descriptor_props import (
ComparableProperty,
CompositeProperty,
SynonymProperty,
)
from .relationships import (
foreign,
remote,
)
from .session import (
Session,
object_session,
sessionmaker,
make_transient,
make_transient_to_detached
)
from .scoping import (
scoped_session
)
from . import mapper as mapperlib
from .query import AliasOption, Query, Bundle
from ..util.langhelpers import public_factory
from .. import util as _sa_util
from . import strategies as _strategies
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
.. seealso::
:ref:`relationships_backref`
"""
return (name, kwargs)
def deferred(*columns, **kw):
"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`.Column` object, however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(ComparableProperty,
".orm.comparable_property")
@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
from . import strategy_options
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
raiseload = strategy_options.raiseload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
from .strategy_options import Load
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util
from . import dynamic
from . import events
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
__go(locals())
|
|
# -*- coding: utf-8 -*-
# @Author: Zachary Priddy
# @Date: 2016-05-03 08:06:32
# @Last Modified by: Zachary Priddy
# @Last Modified time: 2016-06-26 16:21:46
import logging
from core.models.command import Command as ffCommand
from core.models.device import Device
from core.models.event import Event as ffEvent
from core.utils.notify import Notification as ffNotify
import requests
import treq
import json
class Device(Device):
def __init__(self, deviceID, args={}):
self.METADATA = {
'title' : 'Firefly Ecobee Controller',
'type' : 'thermostat',
'package' : 'ffEcobee',
'module' : 'ffEcobee'
}
self.COMMANDS = {
'getPin' : self.register_ecobee_get_pin,
'getToken' : self.register_ecobee_get_token,
'install' : self.install_ecobee,
'update' : self.update,
'setAway' : self.away,
'setHome' : self.resume,
'setMode' : self.set_mode
}
self.REQUESTS = {
}
self.VIEWS = {}
args = args.get('args')
self._api_key = args.get('api_key')
self._api_url = 'https://api.lifx.com/v1/lights/'
self._pin = None
self._auth_code = None
self._access_token = None
self._refresh_token = None
self._token_file = '.ecobee_tokens.json'
name = args.get('name')
super(Device,self).__init__(deviceID, name)
self.install_ecobee()
def register_ecobee_get_pin(self, args={}):
url='https://api.ecobee.com/authorize?response_type=ecobeePin&client_id=' + str(self._api_key) + '&scope=smartWrite'
response = requests.get(url).json()
self._pin = response.get('ecobeePin')
self._auth_code = response.get('code')
ffNotify('all', 'Ecobee Auth Pin: ' + str(self._pin) + '. To finish installing ecobee, goto ecobee.com, login, from the menu on the right. Click Apps, Add App and enter Pin. After this is done you may preceed to the next step.')
return str(self._pin)
def register_ecobee_get_token(self, args={}):
url = 'https://api.ecobee.com/token'
data = {
'grant_type': 'ecobeePin',
'code' : str(self._auth_code),
'client_id' : str(self._api_key)
}
response = requests.post(url, params=data)
logging.critical(response.text)
logging.critical(response.json())
response = response.json()
self._refresh_token = response.get('refresh_token')
self._access_token = response.get('access_token')
logging.critical(self._access_token)
self.write_tokens_to_file()
def refresh_token(self, args={}):
url = 'https://api.ecobee.com/token'
data = {
'grant_type': 'refresh_token',
'code' : str(self._refresh_token),
'client_id' : str(self._api_key)
}
response = requests.post(url, params=data)
logging.critical(response.text)
logging.critical(response.json())
response = response.json()
self._refresh_token = response.get('refresh_token')
self._access_token = response.get('access_token')
logging.critical(self._access_token)
self.write_tokens_to_file()
def write_tokens_to_file(self):
with open(self._token_file, 'w+') as token_file:
tokens = {
'refresh_token' : str(self._refresh_token),
'access_token' : str(self._access_token)
}
json.dump(tokens, token_file)
def read_tokens_from_file(self):
try:
with open(self._token_file, 'r') as token_file:
tokens = json.load(token_file)
self._refresh_token = tokens.get('refresh_token')
self._access_token = tokens.get('access_token')
return True
except:
return False
def install_ecobee(self, args={}):
if self.read_tokens_from_file():
return True
else:
return False
def away(self, args={}):
self.refresh_token()
data = {
"selection": {
"selectionType":"registered",
"selectionMatch":""
},
"functions": [
{
"type":"setHold",
"params":{
"holdType":"indefinite",
"holdClimateRef":"away"
}
}
]
}
headers = {"Authorization": "Bearer " + str(self._access_token), 'Content-Type': 'application/json;charset=UTF-8' }
params = {'format': 'json'}
url = "https://api.ecobee.com/1/thermostat?format=json"
r = requests.post(url, headers=headers, params=params, json=data)
logging.critical(r.text)
def resume(self, args={}):
self.refresh_token()
data = {
"selection": {
"selectionType":"registered",
"selectionMatch":""
},
"functions": [
{
"type":"resumeProgram",
"params":{
"resumeAll":"true",
}
}
]
}
headers = {"Authorization": "Bearer " + str(self._access_token), 'Content-Type': 'application/json;charset=UTF-8' }
params = {'format': 'json'}
url = "https://api.ecobee.com/1/thermostat?format=json"
r = requests.post(url, headers=headers, params=params, json=data)
logging.critical(r.text)
def set_mode(self, args={}):
mode = args.get('mode')
self.refresh_token()
data = {
"selection": {
"selectionType":"registered",
"selectionMatch":""
},
"functions": [
{
"type":"setHold",
"params":{
"holdType":"holdHours",
"holdClimateRef":str(mode),
"holdHours":2
}
}
]
}
headers = {"Authorization": "Bearer " + str(self._access_token), 'Content-Type': 'application/json;charset=UTF-8' }
params = {'format': 'json'}
url = "https://api.ecobee.com/1/thermostat?format=json"
r = requests.post(url, headers=headers, params=params, json=data)
logging.critical(r.text)
def update(self, args={}):
pass
|
|
import pytest
from osf.utils.workflows import DefaultStates, RequestTypes
from osf_tests.factories import (
AuthUserFactory,
NodeRequestFactory,
PreprintFactory,
PreprintProviderFactory,
PreprintRequestFactory,
ProjectFactory,
)
from osf.utils import permissions
@pytest.mark.django_db
class NodeRequestTestMixin(object):
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def requester(self):
return AuthUserFactory()
@pytest.fixture()
def noncontrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, admin, write_contrib):
proj = ProjectFactory(creator=admin)
proj.save()
proj.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
send_email='access_request',
save=True
)
return proj
@pytest.fixture()
def node_request(self, project, requester):
node_request = NodeRequestFactory(
creator=requester,
target=project,
request_type=RequestTypes.ACCESS.value,
machine_state=DefaultStates.INITIAL.value
)
node_request.run_submit(requester)
return node_request
@pytest.fixture()
def second_admin(self, project):
second_admin = AuthUserFactory()
project.add_contributor(
contributor=second_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True
)
return second_admin
@pytest.mark.django_db
class PreprintRequestTestMixin(object):
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def noncontrib(self):
return AuthUserFactory()
@pytest.fixture()
def moderator(self):
return AuthUserFactory()
@pytest.fixture()
def pre_mod_provider(self, moderator):
ppp = PreprintProviderFactory(reviews_workflow='pre-moderation')
ppp.get_group('moderator').user_set.add(moderator)
return ppp
@pytest.fixture()
def post_mod_provider(self, moderator):
ppp = PreprintProviderFactory(reviews_workflow='post-moderation')
ppp.get_group('moderator').user_set.add(moderator)
return ppp
@pytest.fixture()
def none_mod_provider(self):
return PreprintProviderFactory(reviews_workflow=None)
@pytest.fixture()
def pre_mod_preprint(self, admin, write_contrib, pre_mod_provider):
pre = PreprintFactory(
creator=admin,
provider=pre_mod_provider,
is_published=False,
machine_state='pending'
)
pre.ever_public = True
pre.save()
pre.node.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True
)
pre.node.is_public = True
pre.node.save()
return pre
@pytest.fixture()
def auto_withdrawable_pre_mod_preprint(self, admin, write_contrib, pre_mod_provider):
pre = PreprintFactory(
creator=admin,
provider=pre_mod_provider,
is_published=False,
machine_state='pending'
)
pre.save()
pre.node.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True
)
return pre
@pytest.fixture()
def post_mod_preprint(self, admin, write_contrib, post_mod_provider):
post = PreprintFactory(
creator=admin,
provider=post_mod_provider,
)
post.save()
post.node.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True
)
return post
@pytest.fixture()
def none_mod_preprint(self, admin, write_contrib, none_mod_provider):
preprint = PreprintFactory(
creator=admin,
provider=none_mod_provider,
)
preprint.save()
preprint.node.add_contributor(
contributor=write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True
)
return preprint
@pytest.fixture()
def pre_request(self, pre_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def post_request(self, post_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=post_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def none_request(self, none_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=none_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
@pytest.fixture()
def auto_approved_pre_request(self, auto_withdrawable_pre_mod_preprint, admin):
request = PreprintRequestFactory(
creator=admin,
target=auto_withdrawable_pre_mod_preprint,
request_type=RequestTypes.WITHDRAWAL.value,
machine_state=DefaultStates.INITIAL.value
)
request.run_submit(admin)
return request
|
|
#!/usr/bin/env python
import sqlite3
import serial
from threading import Timer
import logging
import hashlib
import os.path
from time import sleep
from datetime import datetime, timedelta
from sys import exit
import paho.mqtt.client as paho
import RPi.GPIO as GPIO
import config
DIR = os.path.dirname(os.path.realpath(__file__))
# config
DATABASE = DIR + '/doorlock.db'
SERIAL_PORTS = ['/dev/ttyUSB0', '/dev/ttyUSB1', '/dev/ttyUSB2', '/dev/ttyUSB3']
DEBUG = False
LOG_FILENAME = DIR + "/doorlock.log"
LOG_LEVEL = logging.INFO # Could be e.g. "DEBUG" or "WARNING"
PONG_TIMEOUT = 30 #in sec
LOCKED = 0
UNLOCKED = 1
SEMI = 2
# regular ping to frontend, every 10 seconds, 30 sec timeout
running = True
lock_status = LOCKED
semi_timer = None
def ping():
if not running:
return
ser.write("PING;\n")
ser.flush()
timer = Timer(10.0, ping)
timer.start()
def lockDoor():
try:
semi_timer.cancel()
except:
pass
lock_status = LOCKED
lock.lock()
def unlockDoor():
if GPIO.input(22) == GPIO.HIGH:
lock_status = SEMI
semiUnlockDoor()
else:
lock_status = UNLOCKED
lock.unlock()
def semiLockDoor():
lock.lock(semi = True)
def semiUnlockDoor():
lock.unlock()
semi_timer = Timer(config.semi_timeout, semiLockDoor)
semi_timer.start()
def statusChange():
sendStatusToFrontend()
if lock.isUnlocked():
mqttc.publish(config.topic, "1", 1, True)
else:
mqttc.publish(config.topic, "0", 1, True)
def sendStatusToFrontend():
if lock.isUnlocked():
ser.write("STATUS,1;\n")
elif lock_status == SEMI:
ser.write("STATUS,2;\n")
else:
ser.write("STATUS,0;\n")
ser.flush()
def create_hash(text):
h = hashlib.sha256()
h.update(text)
return h.hexdigest()
def ring_doorbell():
logger.info("Ring Ring")
def serial_connect():
for serial_port in SERIAL_PORTS:
if os.path.exists(serial_port):
logger.info("Using serial port %s" % serial_port)
return serial.Serial(serial_port, timeout=6)
logger.error("No valid serial port found. Sleeping and retrying...")
sleep(60)
return serial_connect()
# MQTT functions
def on_connect(mosq, obj, rc):
logging.info("Connect with RC " + str(rc))
def on_disconnect(client, userdata, rc):
logging.warning("Disconnected (RC " + str(rc) + ")")
if rc <> 0:
try_reconnect(client)
# MQTT reconnect
def try_reconnect(client, time = 60):
try:
logging.info("Trying reconnect")
client.reconnect()
except:
logging.warning("Reconnect failed. Trying again in " + str(time) + " seconds")
Timer(time, try_reconnect, [client]).start()
# get logger
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=LOG_LEVEL,
filename=LOG_FILENAME)
logger = logging.getLogger("doorlock")
logger.info("Starting doorlock backend")
# instantiate db connection
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
logger.debug("Database opened")
# connect to serial port
ser = serial_connect()
logger.debug("Serial port to frontend opened")
# initialize MQTT
logging.info("Initializing MQTT")
mqttc = paho.Client("mumalab_doorlock")
mqttc.username_pw_set(config.broker["user"], config.broker["password"])
mqttc.will_set(config.topic, "?", 1, True)
mqttc.connect(config.broker["hostname"], config.broker["port"], 60)
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
mqttc.loop_start()
# setup GPIO
GPIO.setwarnings(True)
GPIO.setmode(GPIO.BCM)
GPIO.setup(22, GPIO.IN)
# lock implementation
if DEBUG:
from simlock import SimLock
lock = SimLock()
else:
from motorlock import MotorLock
lock = MotorLock()
if lock.locked:
lock_status = LOCKED
elif GPIO.input(22) == GPIO.HIGH:
lock_status = SEMI
else:
lock_status = UNLOCKED
lock.onStatusChange += statusChange
# start pinging frontend
last_successful_ping = datetime.now()
ping()
while True:
try:
a = ser.readline()
if not a:
if last_successful_ping + timedelta(seconds=PONG_TIMEOUT) < datetime.now():
logger.warning("Got no PONG from Arduino from %d sec, reinitializing serial port" % PONG_TIMEOUT)
try:
ser.close()
except Exception as e:
logger.error("Failed to close serial port! Got exception: %s" % str(e))
ser = serial_connect()
ping()
last_successful_ping = datetime.now()
continue
b = a.rstrip("\n\r;").split(",")
if b == ["PONG"]:
last_successful_ping = datetime.now()
logger.debug("Got Pong...(%s)" % (str(last_successful_ping)))
else:
logger.debug(b)
# Unlock command: "UNLOCK,<token>,<pin>;"
# Reply with "ACK;" or "NAK;"
if b[0] == "UNLOCK":
t = (b[1], create_hash(b[1] + ":" + b[2]))
c.execute(
'SELECT p.name from dl_tokens t JOIN dl_persons p ON t.person_id = p.id WHERE t.token=? AND t.pin=? AND p.disabled =0',
t)
r = c.fetchone()
if r != None:
logger.warning("Valid unlock request by %s (%s)", r[0], t[0])
ser.write("ACK;\n")
unlockDoor()
else:
logger.error("Invalid unlock request (%s, %s)", t[0], b[2])
ser.write("NAK;\n")
ser.flush()
elif b[0] == "RING":
ring_doorbell()
# Semi unlock command "SEMI_UNLOCK;"
# no reply expected by frontend
elif b[0] == "SEMI_UNLOCK":
logger.warning("Semi unlock request");
if lock_status == SEMI:
semiUnlockDoor()
else:
logger.error("Not in SEMI mode. Something's fishy.");
# Lock command "LOCK;"
# no reply expected by frontend
elif b[0] == "LOCK":
logger.warning("Lock request");
lockDoor()
# reply to ping: "PONG;"
# passing status to frontend afterwards
elif b[0] == "PONG":
sendStatusToFrontend()
except serial.serialutil.SerialException:
logger.error("Serial adapter disconnected! Sleeping and trying to reconnect...")
sleep(10)
ser = serial_connect()
except KeyboardInterrupt:
print "Received keyboard interrupt. Stopping..."
running = False
break
except Exception as e:
logger.error(e)
exit(99)
logger.info("Stopping doorlock backend")
mqttc.loop_stop()
mqttc.disconnect()
|
|
import os, configparser, sys, string, re
from fnmatch import fnmatch
def writeMakefile(type):
config = Config('bakefile.ini')
settings = ProjectSettings(config, type)
bakefile = Bakefile(settings)
bf = open('generated-bakefile.bkl', 'w', newline='')
bf.write(bakefile.generateProgram(type))
bf.close()
def runBakefile():
# todo:
# if windows:
# bkl bakefile.bkl
# else if osx:
# bakefile bakefile.bkl
#
# - check if bakefile is installed
#
# - use subprocess to run bakefile
# https://docs.python.org/2/library/subprocess.html
return
class Generator:
def findFiletypes(root, filetypes):
matchedFiles = ''
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, filetypes):
matchedFiles += '\n' + os.path.join(path, name)
return matchedFiles
def genHeaders(root):
headers = Generator.findFiletypes(root, '*.hpp')
headers += Generator.findFiletypes(root, '*.h')
headers += Generator.findFiletypes(root, '*.hxx')
return headers
def genSources(root):
sources = Generator.findFiletypes(root, '*.cpp')
sources += Generator.findFiletypes(root, '*.c')
return sources
class Bakefile:
def generateLib(self):
self._writeLib()
return self._buffer
def generateProgram(self, type):
self._writeBody(type)
return self._buffer
def includeDir(self, path):
self._includeDirs.append(path)
def libs(self, lib):
self._libs.append(path)
def libDir(self, path):
self._libDir.append(path)
def _writeBody(self, type):
self._write(type + ' ' + self._settings.config.projectName)
self._writeDependencies()
self._write(' {\n')
self._indent(1)
self._writeIncludes()
self._writeHeaders()
self._writeSources()
self._indent(-1)
self._write('\n}')
return self._buffer
def _format(self, s):
ind = ''
for i in range(0, self._indents):
ind += '\t'
newlines = [m.start() for m in re.finditer('\n', s)]
if (not newlines):
return ind + s
for n in range(0, len(newlines)):
s = s[:newlines[n]] + ind + s[newlines[n]:]
return ind + s
def _write(self, s):
self._buffer += s #self._format(s)
def _indent(self, indents):
self._indents += indents
def _writeIncludes(self):
for i in range(0, len(self._settings.includeDirs)):
self.includeDir(self._settings.includeDirs[i])
def _writeDependencies(self):
if not self._dependencies:
return
self._write(' : ')
for i in range(0, len(self._dependencies)):
self._write(self._dependencies[i])
if i != len(self._dependencies):
self._write(', ')
def _writeLibs(self):
for i in range(0, len(self._libs)):
self._write('libs += ' + self._libs[i])
def _writeHeaders(self):
self._write('headers {')
for i in range(0, len(self._includeDirs)):
self._write(Generator.genHeaders(self._includeDirs[i]))
self._write('\n}\n')
def _writeSources(self):
self._write('sources {')
for i in range(0, len(self._includeDirs)):
self._write(Generator.genSources(self._includeDirs[i]))
self._write('\n}\n')
def __init__(self, settings):
self._buffer = ''
self._settings = settings
self._indents = 0
self._includeDirs = []
self._libs = []
self._libDirs = []
self._headers = []
self._sources = []
self._dependencies = []
class Config:
def _loadConfig(self):
self._readProjectProperties()
def _getPath(self, section, option):
path = self.parser.get(section, option)
path = path.replace('/', '\\')
if not path.endswith('\\'):
path += '\\'
return path
def _readProjectProperties(self):
s = 'Project'
self.projectName = self.parser.get(s, 'projectname')
self.rootdir = self._getPath(s, 'rootdir')
s = 'Engine'
self.enginesrc = self.rootdir + self._getPath(s, 'enginesrc')
s = 'Editor'
self.editorsrc = self.rootdir + self._getPath(s, 'editorsrc')
s = 'Game'
self.gameName = self.parser.get(s, 'gamename')
self.gamesrc = self.rootdir + self._getPath(s, 'gamesrc')
def __init__(self, path):
self.path = path
self.parser = configparser.ConfigParser()
self.parser.read(path)
self._loadConfig()
class ProjectSettings:
def _editorSetup(settings):
settings.includeDirs.append(settings.config.enginesrc)
settings.includeDirs.append(settings.config.editorsrc)
def _gameSetup(settings):
settings.includeDirs.append(settings.config.enginesrc)
settings.includeDirs.append(settings.config.gamesrc)
projectTypes = {
'editor': _editorSetup,
'game': _gameSetup,
}
def __init__(self, config, *types):
self.includeDirs = []
self.config = config
for i in range(0, len(types)):
if types[i] in self.projectTypes:
self.projectTypes[types[i]](self)
else:
print('Unknown type: ' + str(types[i]))
if __name__ == "__main__":
if not sys.argv[1:]:
print("Missing arguments")
else:
for i in range(1, len(sys.argv)):
writeMakefile(sys.argv[i])
|
|
"""
Celery Worker Setup
"""
import abc
import asyncio
import logging
import os
import re
from functools import wraps
import subprocess
from importlib import import_module
import aiohttp
from celery import Celery, Task
from celery.signals import celeryd_init
from kombu import serialization
import simplejson
from ..githubhandler import GitHubAppHandler, GitHubHandler
from ..githandler import install_gpg_key
from ..utils import RepoData, setup_logger
from .config import (
APP_ID, APP_KEY, CODE_SIGNING_KEY, BOT_NAME, REPODATA_TIMEOUT,
APP_CLIENT_ID, APP_CLIENT_SECRET
)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class AsyncTask(Task):
"""Task class with support for async tasks
We override celery.Task with our own version, with some extra
features and defaults:
- Since we already use a lot of async stuff elsewhere, it's useful
to allow the ``run`` method of tasks be ``async``. This Task
class detects if the method provided is a coroutine and runs it
inside the asyncio event loop.
>>> @app.task()
>>> async def mytask(self, bind=True):
>>> await self.async_init()
>>> ...
- Provide access to a GitHubAppHandler instance shared at least
within the worker process.
This is a little tedious. Since the task may be spawned some
time after the webook that created it was triggered, the tokens
we got inside the webserver may have timed out. In an attempt to
avoid wasting API calls to create those tokens continuously, the
Task class maintains a copy.
- Default to ``acks_late = True``. The reason we use Celery at all
is so that spawned tasks can survive a shutdown of the app.
"""
#: Our tasks should be re-run if they don't finish
acks_late = True
#: Access the Github API
ghapi: "GitHubHandler" = None
#: Access Github App API
ghappapi: "GitHubAppHandler" = None
#: Stores the async run method when the sync run wrapper is installed
_async_run = None
def bind(self, app=None):
"""Intercept binding of task to (celery) app
Here we take the half-finished generated Task class and
replace the async run method with a sync run method that
executes the original method inside the asyncio loop.
"""
if asyncio.iscoroutinefunction(self.run): # only for async funcs
@wraps(self.run)
def sync_run(*args, **kwargs):
largs = list(args) # need list so that pre-run can modify
self.loop.run_until_complete(self.async_pre_run(largs, kwargs))
return self.loop.run_until_complete(self._async_run(*largs, **kwargs))
# swap run method with wrapper defined above
self._async_run, self.run = self.run, sync_run
if not self.loop.is_running():
self.loop.run_until_complete(self.async_init())
super().bind(app)
async def async_init(self):
"""Init things that need to be run inside the loop
This happens during binding -> on load.
"""
if not self.ghappapi:
self.ghappapi = GitHubAppHandler(aiohttp.ClientSession(), BOT_NAME,
APP_KEY, APP_ID,
APP_CLIENT_ID, APP_CLIENT_SECRET)
async def async_pre_run(self, args, _kwargs):
"""Per-call async initialization
Prepares the `ghapi` property for tasks.
FIXME: doesn't replace kwargs
"""
for num, arg in enumerate(args):
if isinstance(arg, GitHubHandler):
args[num] = await self.ghappapi.get_github_api(
False, arg.user, arg.repo,
arg.installation)
@abc.abstractmethod
def run(self, *_args, **_kwargs):
"""The tasks actual run method. Will be replaced during bind"""
@property
def loop(self):
"""Get the async loop - creating a new one if necessary"""
try:
return asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def custom_dumps(string):
"""Serialize **s** to JSON accepting **for_json** serializer method"""
return simplejson.dumps(string, for_json=True)
def custom_loads(string):
"""Deserialize **s** recreating objects
JSON objects (dicts) containing a __type__ and a __module__
field are turned into objects by loading and instantiating
the type, passing the result dict from obj.for_json() to
__init__().
"""
def decode(obj):
if isinstance(obj, dict):
try:
typ = obj.pop('__type__')
mod = import_module(obj.pop('__module__'))
klass = getattr(mod, typ)
return klass(**obj)
except KeyError:
pass
return obj
return simplejson.loads(string, object_hook=decode)
# Register a custom serializer. We do this so we can conveniently
# transfer objects without resorting to pickling.
serialization.register('custom_json',
custom_dumps, custom_loads,
content_type='application/x-bioconda-json',
content_encoding='utf8')
# Instantiate Celery app, setting our AsyncTask as default
# task class and loading the tasks from tasks.py
capp = Celery( # pylint: disable=invalid-name
task_cls=AsyncTask,
include=['bioconda_utils.bot.tasks']
)
# Celery must be configured at module level to catch worker as well
# Settings are suggestions from CloudAMPQ
capp.conf.update(
# Set the URL to the AMQP broker using environment variable
broker_url=os.environ.get('CLOUDAMQP_URL'),
# Limit the number of connections to the pool. This should
# be 2 when running on Heroku to avoid running out of free
# connections on CloudAMPQ.
#
# broker_pool_limit=2, # need two so we can inspect
broker_heartbeat=None,
broker_connection_timeout=30,
# We don't feed back our tasks results
result_backend='rpc://',
event_queue_expires=60,
worker_prefetch_multiplier=1,
worker_concurrency=1,
task_serializer='custom_json',
accept_content=['custom_json', 'json']
#task_acks_late=true
)
@celeryd_init.connect
def setup_new_celery_process(sender=None, conf=None, **_kwargs):
"""This hook is called when a celery worker is initialized
Here we make sure that the GPG signing key is installed
"""
install_gpg_key(CODE_SIGNING_KEY)
RepoData().set_timeout(REPODATA_TIMEOUT)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators. See the @{$python/script_ops} guide.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_script_ops
from tensorflow.python.util.tf_export import tf_export
class EagerFunc(object):
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
"""
self._func = func
self._out_dtypes = Tout
def __call__(self, on_gpu, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode():
ret = self._func(*args)
maybe_copy_to_gpu = lambda x: x if not on_gpu else x.gpu()
if isinstance(ret, (tuple, list)):
return [
maybe_copy_to_gpu(ops.convert_to_tensor(x, dtype=dtype))
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
return ret
else:
return maybe_copy_to_gpu(
ops.convert_to_tensor(ret, dtype=self._out_dtypes[0]))
class FuncRegistry(object):
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
self._funcs = {}
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for Python3
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = np.asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode())(value)
return np.asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, on_gpu, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
on_gpu: A boolean indicating whether or not `token`'s corresponding
operation was placed on GPU; only used if the function registered for
`token` is an `EagerPyFunc`.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self._funcs[token]
if func is None:
raise ValueError("callback %s is not found" % token)
if isinstance(func, EagerFunc):
return func(on_gpu, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, six.binary_type):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
pywrap_tensorflow.InitializePyTrampoline(_py_funcs)
class CleanupFunc(object):
"""A helper class to remove a registered function from _py_funcs."""
def __init__(self, token):
self._token = token
def __del__(self):
_py_funcs.remove(self._token)
def _internal_py_func(func, inp, Tout, stateful=None, eager=False, name=None):
"""See documentation for py_func and eager_py_func."""
is_list_or_tuple = False
if isinstance(Tout, (list, tuple)):
is_list_or_tuple = True
else:
Tout = [Tout]
if eager:
func = EagerFunc(func, Tout)
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
# pylint: disable=protected-access
while isinstance(graph, function._FuncGraph):
# If the py_func was declared inside a _FuncGraph, its lifetime should be
# bound to that of the outer graph instead.
graph = graph._outer_graph
cleanup = CleanupFunc(token)
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_cleanup_py_funcs_used_in_graph"):
graph._cleanup_py_funcs_used_in_graph = []
# When `graph` is destroyed, elements in _cleanup_py_funcs_used_in_graph
# will be destroyed and their __del__ will remove the 'token' from
# the funcs registry.
graph._cleanup_py_funcs_used_in_graph.append(cleanup)
# pylint: enable=protected-access
# pylint: disable=protected-access
if eager:
result = gen_script_ops._eager_py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
if stateful:
result = gen_script_ops._py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops._py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
# pylint: enable=protected-access
return result if is_list_or_tuple else result[0]
def eager_py_func(func, inp, Tout, name=None):
"""Wraps a python function into a TensorFlow op.
When the returned op is executed, `func` is invoked with eager execution
enabled. Inputs are Tensor objects and func must return None or objects
that may be converted to Tensor objects.
This function has the same limitations as `py_func` with respect to
serialization and distribution.
Args:
func: A Python function which accepts a list of `Tensor` objects
having element types that match the corresponding `tf.Tensor` objects
in `inp` and returns a list of `Tensor` objects (or a single
`Tensor`, or `None`) having element types that match the
corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns; an empty list
if no value is returned (i.e., if the return value is `None`).
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes; an empty list
if `func` returns None.
"""
return _internal_py_func(func=func, inp=inp, Tout=Tout, eager=True, name=name)
@tf_export("py_func")
def py_func(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
inputs and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.placeholder(tf.float32)
y = tf.py_func(my_func, [inp], tf.float32)
```
**N.B.** The `tf.py_func()` operation has the following known limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_func()`. If you are using distributed TensorFlow, you
must run a `tf.train.Server` in the same process as the program that calls
`tf.py_func()` and you must pin the created operation to a device in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
having element types that match the corresponding `tf.Tensor` objects
in `inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors.
In-place modification or storing `func` input or return values in
python datastructures without explicit (np.)copy
can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful.
If a function is stateless, when given the same input it will return the
same output and have no observable side effects. Optimizations such as
common subexpression elimination are only performed on stateless
operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
"""
return _internal_py_func(
func=func, inp=inp, Tout=Tout, stateful=stateful, eager=False, name=name)
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
|
|
#!/usr/bin/env python
""" A unittest script for the VisitAttribute module. """
import unittest
from CutlassTestConfig import CutlassTestConfig
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class VisitAttributeTest(unittest.TestCase):
""" Unit tests for the VisitAttribute class """
session = None
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
# Establish the session for each test method
cls.session = CutlassTestConfig.get_session()
cls.util = CutlassTestUtil()
def testImport(self):
""" Test the importation of the VisitAttribute module. """
success = False
try:
from cutlass import VisitAttribute
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(VisitAttribute is None)
def testSessionCreate(self):
""" Test the creation of a VisitAttribute via the session. """
success = False
attr = None
try:
attr = self.session.create_visit_attr()
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(attr is None)
def testToJson(self):
""" Test the generation of JSON from a VisitAttribute instance. """
attr = self.session.create_visit_attr()
success = False
attr.study = "prediabetes"
attr.tags = ["test", "visit_attr"]
attr_json = None
try:
attr_json = attr.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(attr_json is not None, "to_json() returned data.")
def testComment(self):
""" Test the comment property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "comment")
self.util.stringPropertyTest(self, attr, "comment")
def testMotherChild(self):
""" Test the mother_child property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "mother_child")
self.util.stringPropertyTest(self, attr, "mother_child")
def testSubproject(self):
""" Test the subproject property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "subproject")
self.util.stringPropertyTest(self, attr, "subproject")
def testSurveyID(self):
""" Test the survey_id property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "survey_id")
self.util.stringPropertyTest(self, attr, "survey_id")
def testTimeDuringPregnancy(self):
""" Test the time_during_pregnancy property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "time_during_pregnancy")
self.util.stringPropertyTest(self, attr, "time_during_pregnancy")
def testClinicalPatientAge(self):
""" Test the age property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "age")
self.util.intPropertyTest(self, attr, "age")
def testClinicalPatientHeight(self):
""" Test the height property. """
attr = self.session.create_visit_attr()
self.util.floatTypeTest(self, attr, "height")
self.util.floatPropertyTest(self, attr, "height")
def testClinicalPatientWeight(self):
""" Test the weight property. """
attr = self.session.create_visit_attr()
self.util.floatTypeTest(self, attr, "weight")
self.util.floatPropertyTest(self, attr, "weight")
def testClinicalPatientWeightDiff(self):
""" Test the weight_diff property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "weight_diff")
self.util.stringPropertyTest(self, attr, "weight_diff")
def testClinicalPatientBMI(self):
""" Test the bmi property. """
attr = self.session.create_visit_attr()
self.util.floatTypeTest(self, attr, "bmi")
self.util.floatPropertyTest(self, attr, "bmi")
def testClinicalPatientHBI(self):
""" Test the hbi property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "hbi")
self.util.boolPropertyTest(self, attr, "hbi")
def testClinicalPatientHBITotal(self):
""" Test the hbi_total property. """
attr = self.session.create_visit_attr()
self.util.floatTypeTest(self, attr, "hbi_total")
self.util.floatPropertyTest(self, attr, "hbi_total")
def testClinicalPatientSCCAI(self):
""" Test the sccai property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "sccai")
self.util.boolPropertyTest(self, attr, "sccai")
def testClinicalPatientSCCAITotal(self):
""" Test the sccai_total property. """
attr = self.session.create_visit_attr()
self.util.floatTypeTest(self, attr, "sccai_total")
self.util.floatPropertyTest(self, attr, "sccai_total")
def testClinicalPatientFastGluc(self):
""" Test the fast_gluc property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "fast_gluc")
self.util.intPropertyTest(self, attr, "fast_gluc")
def testClinicalPatient30mGluc(self):
""" Test the thirtym_gluc property. """
attr = self.session.create_visit_attr()
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "thirtym_gluc")
self.util.intPropertyTest(self, attr, "thirtym_gluc")
def testClinicalPatient60mGluc(self):
""" Test the sixtym_gluc property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "sixtym_gluc")
self.util.intPropertyTest(self, attr, "sixtym_gluc")
def testHrtPrior(self):
""" Test the prior property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "prior")
self.util.boolPropertyTest(self, attr, "prior")
def testHrtCurrent(self):
""" Test the current property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "current")
self.util.boolPropertyTest(self, attr, "current")
def testHrtDuration(self):
""" Test the duration property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "duration")
self.util.stringPropertyTest(self, attr, "duration")
def testHealthAssessSelfAssess(self):
""" Test the self_assess property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "self_assess")
self.util.boolPropertyTest(self, attr, "self_assess")
def testHealthAssessSelfCondition(self):
""" Test the self_condition property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "self_condition")
self.util.stringPropertyTest(self, attr, "self_condition")
def testHealthAssessAbdominalPain(self):
""" Test the abdominal_pain property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "abdominal_pain")
self.util.boolPropertyTest(self, attr, "abdominal_pain")
def testHealthAssessAcuteDis(self):
""" Test the acute_dis property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "acute_dis")
self.util.stringPropertyTest(self, attr, "acute_dis")
def testHealthAssessArthralgia(self):
""" Test the arthralgia property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "arthralgia")
self.util.boolPropertyTest(self, attr, "arthralgia")
def testHealthAssessBowelDay(self):
""" Test the bowel_day property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "bowel_day")
self.util.intPropertyTest(self, attr, "bowel_day")
def testHealthAssessBowelNight(self):
""" Test the bowel_night property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "bowel_night")
self.util.intPropertyTest(self, attr, "bowel_night")
def testHealthAssessCancer(self):
""" Test the cancer property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "cancer")
self.util.stringPropertyTest(self, attr, "cancer")
def testHealthAssessCancerMtc(self):
""" Test the cancer_mtc property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "cancer_mtc")
self.util.boolPropertyTest(self, attr, "cancer_mtc")
def testHealthAssessChestPain(self):
""" Test the chest_pain property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "chest_pain")
self.util.boolPropertyTest(self, attr, "chest_pain")
def testHealthAssessClaudication(self):
""" Test the claudication property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "claudication")
self.util.boolPropertyTest(self, attr, "claudication")
def testHealthAssessChronicDis(self):
""" Test the chronic_dis property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "chronic_dis")
self.util.stringPropertyTest(self, attr, "chronic_dis")
def testHealthAssessDiarrhea(self):
""" Test the diarrhea property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "diarrhea")
self.util.boolPropertyTest(self, attr, "diarrhea")
def testHealthAssessDyspnea(self):
""" Test the dyspnea property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "dyspnea")
self.util.boolPropertyTest(self, attr, "dyspnea")
def testHealthAssessEryNodosum(self):
""" Test the ery_nodosum property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "ery_nodosum")
self.util.boolPropertyTest(self, attr, "ery_nodosum")
def testHealthAssessFever(self):
""" Test the fever property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "fever")
self.util.stringPropertyTest(self, attr, "fever")
def testHealthAssessLegEdema(self):
""" Test the leg_edema property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "leg_edema")
self.util.boolPropertyTest(self, attr, "leg_edema")
def testHealthAssessNeurologic(self):
""" Test the neurologic property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "neurologic")
self.util.boolPropertyTest(self, attr, "neurologic")
def testHealthAssessPregnant(self):
""" Test the pregnant property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "pregnant")
self.util.boolPropertyTest(self, attr, "pregnant")
def testHealthAssessPregPlans(self):
""" Test the preg_plans property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "preg_plans")
self.util.boolPropertyTest(self, attr, "preg_plans")
def testHealthAssessPyoGangrenosum(self):
""" Test the pyo_gangrenosum property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "pyo_gangrenosum")
self.util.boolPropertyTest(self, attr, "pyo_gangrenosum")
def testHealthAssessRash(self):
""" Test the rash property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "rash")
self.util.boolPropertyTest(self, attr, "rash")
def testHealthAssessStoolBlood(self):
""" Test the stool_blood property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "stool_blood")
self.util.boolPropertyTest(self, attr, "stool_blood")
def testHealthAssessStoolSoft(self):
""" Test the stool_soft property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "stool_soft")
self.util.intPropertyTest(self, attr, "stool_soft")
def testHealthAssessSurgery(self):
""" Test the surgery property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "surgery")
self.util.stringPropertyTest(self, attr, "surgery")
def testHealthAssessUrgencyDef(self):
""" Test the urgency_def property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "urgency_def")
self.util.stringPropertyTest(self, attr, "urgency_def")
def testHealthAssessUveitis(self):
""" Test the uveitis property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "uveitis")
self.util.boolPropertyTest(self, attr, "uveitis")
def testHealthAssessWeightChange(self):
""" Test the weight_change property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "weight_change")
self.util.stringPropertyTest(self, attr, "weight_change")
def testHealthAssessDiagOther(self):
""" Test the diag_other property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "diag_other")
self.util.stringPropertyTest(self, attr, "diag_other")
def testHealthAssessHosp(self):
""" Test the hosp property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "hosp")
self.util.boolPropertyTest(self, attr, "hosp")
def testHealthAssessWorkMissed(self):
""" Test the work_missed property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "work_missed")
self.util.intPropertyTest(self, attr, "work_missed")
def testMedicationsNewMeds(self):
""" Test the new_meds property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "new_meds")
self.util.boolPropertyTest(self, attr, "new_meds")
def testMedicationsStoppedMeds(self):
""" Test the stopped_meds property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "stopped_meds")
self.util.boolPropertyTest(self, attr, "stopped_meds")
def testMedicationsAbx(self):
""" Test the abx property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "abx")
self.util.boolPropertyTest(self, attr, "abx")
def testMedicationsChemo(self):
""" Test the chemo property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "chemo")
self.util.boolPropertyTest(self, attr, "chemo")
def testMedicationsImmunosupp(self):
""" Test the immunosupp property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "immunosupp")
self.util.boolPropertyTest(self, attr, "immunosupp")
def testTestsColonoscopy(self):
""" Test the colonoscopy property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "colonoscopy")
self.util.boolPropertyTest(self, attr, "colonoscopy")
def testTestsOralContrast(self):
""" Test the oral_contrast property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "oral_contrast")
self.util.boolPropertyTest(self, attr, "oral_contrast")
def testDiseaseComment(self):
""" Test the disease_comment property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_comment")
self.util.stringPropertyTest(self, attr, "disease_comment")
def testDiseaseName(self):
""" Test the disease_name property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_name")
self.util.stringPropertyTest(self, attr, "disease_name")
def testDiseaseDescription(self):
""" Test the disease_description property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_description")
self.util.stringPropertyTest(self, attr, "disease_description")
def testDiseaseOntologyID(self):
""" Test the disease_ontology_id property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_ontology_id")
self.util.stringPropertyTest(self, attr, "disease_ontology_id")
def testDiseaseMeshID(self):
""" Test the disease_mesh_id property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_mesh_id")
self.util.stringPropertyTest(self, attr, "disease_mesh_id")
def testDiseaseNciID(self):
""" Test the disease_nci_id property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_nci_id")
self.util.stringPropertyTest(self, attr, "disease_nci_id")
def testDiseaseUmlsConceptID(self):
""" Test the disease_umls_concept_id property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_umls_concept_id")
self.util.stringPropertyTest(self, attr, "disease_umls_concept_id")
def testDiseaseStudyStatus(self):
""" Test the disease_study_status property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "disease_study_status")
self.util.stringPropertyTest(self, attr, "disease_study_status")
def testPsychPsychiatric(self):
""" Test the psychiatric property. """
attr = self.session.create_visit_attr()
self.util.boolTypeTest(self, attr, "psychiatric")
self.util.boolPropertyTest(self, attr, "psychiatric")
def testPsychUpset(self):
""" Test the upset property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "upset")
self.util.intPropertyTest(self, attr, "upset")
def testPsychControl(self):
""" Test the control property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "control")
self.util.intPropertyTest(self, attr, "control")
def testPsychStress(self):
""" Test the stress property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "stress")
self.util.intPropertyTest(self, attr, "stress")
def testPsychStressDef(self):
""" Test the stress_def property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "stress_def")
self.util.stringPropertyTest(self, attr, "stress_def")
def testPsychConfident(self):
""" Test the confident property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "confident")
self.util.intPropertyTest(self, attr, "confident")
def testPsychGoingYourWay(self):
""" Test the going_your_way property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "going_your_way")
self.util.intPropertyTest(self, attr, "going_your_way")
def testPsychCoping(self):
""" Test the coping property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "coping")
self.util.intPropertyTest(self, attr, "coping")
def testPsychIrritation(self):
""" Test the irritation property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "irritation")
self.util.intPropertyTest(self, attr, "irritation")
def testPsychOnTop(self):
""" Test the on_top property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "on_top")
self.util.intPropertyTest(self, attr, "on_top")
def testPsychAnger(self):
""" Test the anger property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "anger")
self.util.intPropertyTest(self, attr, "anger")
def testPsychDifficulties(self):
""" Test the difficulties property. """
attr = self.session.create_visit_attr()
self.util.intTypeTest(self, attr, "difficulties")
self.util.intPropertyTest(self, attr, "difficulties")
def testExerciseVigActivity(self):
"""
Test the vig_activity_days, vig_activity_hours and
vig_activity_minutes properties.
"""
attr = self.session.create_visit_attr()
# vig_activity_days
self.util.intTypeTest(self, attr, "vig_activity_days")
self.util.intPropertyTest(self, attr, "vig_activity_days")
# vig_activity_hours
self.util.intTypeTest(self, attr, "vig_activity_hours")
self.util.intPropertyTest(self, attr, "vig_activity_hours")
# vig_activity_minutes
self.util.intTypeTest(self, attr, "vig_activity_minutes")
self.util.intPropertyTest(self, attr, "vig_activity_minutes")
def testExerciseModActivity(self):
"""
Test the mod_activity_days, mod_activity_hours and
mod_activity_minutes properties.
"""
attr = self.session.create_visit_attr()
# mod_activity_days
self.util.intTypeTest(self, attr, "mod_activity_days")
self.util.intPropertyTest(self, attr, "mod_activity_days")
# mod_activity_hours
self.util.intTypeTest(self, attr, "mod_activity_hours")
self.util.intPropertyTest(self, attr, "mod_activity_hours")
# mod_activity_minutes
self.util.intTypeTest(self, attr, "mod_activity_minutes")
self.util.intPropertyTest(self, attr, "mod_activity_minutes")
def testExerciseWalking(self):
"""
Test the walking_days, walking_hours and walking_minutes properties.
"""
attr = self.session.create_visit_attr()
# walking_days
self.util.intTypeTest(self, attr, "walking_days")
self.util.intPropertyTest(self, attr, "walking_days")
# walking_hours
self.util.intTypeTest(self, attr, "walking_hours")
self.util.intPropertyTest(self, attr, "walking_hours")
# walking_minutes
self.util.intTypeTest(self, attr, "walking_minutes")
self.util.intPropertyTest(self, attr, "walking_minutes")
def testExerciseActivity30d(self):
""" Test the activity_30d property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "activity_30d")
self.util.stringPropertyTest(self, attr, "activity_30d")
def testExerciseActivity3m(self):
""" Test the activity_3m property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "activity_3m")
self.util.stringPropertyTest(self, attr, "activity_3m")
def testExerciseActivityChange30d(self):
""" Test the activity_change_30d property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "activity_change_30d")
self.util.stringPropertyTest(self, attr, "activity_change_30d")
def testExerciseActivityChange3m(self):
""" Test the activity_change_3m property. """
attr = self.session.create_visit_attr()
self.util.stringTypeTest(self, attr, "activity_change_3m")
self.util.stringPropertyTest(self, attr, "activity_change_3m")
if __name__ == '__main__':
unittest.main()
|
|
"""Support for the iZone HVAC."""
import logging
from typing import Optional, List
from pizone import Zone, Controller
from homeassistant.core import callback
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT_COOL,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
FAN_LOW,
FAN_MEDIUM,
FAN_HIGH,
FAN_AUTO,
PRESET_ECO,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_HALVES,
TEMP_CELSIUS,
CONF_EXCLUDE,
)
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DATA_DISCOVERY_SERVICE,
IZONE,
DISPATCH_CONTROLLER_DISCOVERED,
DISPATCH_CONTROLLER_DISCONNECTED,
DISPATCH_CONTROLLER_RECONNECTED,
DISPATCH_CONTROLLER_UPDATE,
DISPATCH_ZONE_UPDATE,
DATA_CONFIG,
)
_LOGGER = logging.getLogger(__name__)
_IZONE_FAN_TO_HA = {
Controller.Fan.LOW: FAN_LOW,
Controller.Fan.MED: FAN_MEDIUM,
Controller.Fan.HIGH: FAN_HIGH,
Controller.Fan.AUTO: FAN_AUTO,
}
async def async_setup_entry(
hass: HomeAssistantType, config: ConfigType, async_add_entities
):
"""Initialize an IZone Controller."""
disco = hass.data[DATA_DISCOVERY_SERVICE]
@callback
def init_controller(ctrl: Controller):
"""Register the controller device and the containing zones."""
conf = hass.data.get(DATA_CONFIG) # type: ConfigType
# Filter out any entities excluded in the config file
if conf and ctrl.device_uid in conf[CONF_EXCLUDE]:
_LOGGER.info("Controller UID=%s ignored as excluded", ctrl.device_uid)
return
_LOGGER.info("Controller UID=%s discovered", ctrl.device_uid)
device = ControllerDevice(ctrl)
async_add_entities([device])
async_add_entities(device.zones.values())
# create any components not yet created
for controller in disco.pi_disco.controllers.values():
init_controller(controller)
# connect to register any further components
async_dispatcher_connect(hass, DISPATCH_CONTROLLER_DISCOVERED, init_controller)
return True
class ControllerDevice(ClimateDevice):
"""Representation of iZone Controller."""
def __init__(self, controller: Controller) -> None:
"""Initialise ControllerDevice."""
self._controller = controller
self._supported_features = SUPPORT_FAN_MODE
if (
controller.ras_mode == "master" and controller.zone_ctrl == 13
) or controller.ras_mode == "RAS":
self._supported_features |= SUPPORT_TARGET_TEMPERATURE
self._state_to_pizone = {
HVAC_MODE_COOL: Controller.Mode.COOL,
HVAC_MODE_HEAT: Controller.Mode.HEAT,
HVAC_MODE_HEAT_COOL: Controller.Mode.AUTO,
HVAC_MODE_FAN_ONLY: Controller.Mode.VENT,
HVAC_MODE_DRY: Controller.Mode.DRY,
}
if controller.free_air_enabled:
self._supported_features |= SUPPORT_PRESET_MODE
self._fan_to_pizone = {}
for fan in controller.fan_modes:
self._fan_to_pizone[_IZONE_FAN_TO_HA[fan]] = fan
self._available = True
self._device_info = {
"identifiers": {(IZONE, self.unique_id)},
"name": self.name,
"manufacturer": "IZone",
"model": self._controller.sys_type,
}
# Create the zones
self.zones = {}
for zone in controller.zones:
self.zones[zone] = ZoneDevice(self, zone)
async def async_added_to_hass(self):
"""Call on adding to hass."""
# Register for connect/disconnect/update events
@callback
def controller_disconnected(ctrl: Controller, ex: Exception) -> None:
"""Disconnected from controller."""
if ctrl is not self._controller:
return
self.set_available(False, ex)
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCH_CONTROLLER_DISCONNECTED, controller_disconnected
)
)
@callback
def controller_reconnected(ctrl: Controller) -> None:
"""Reconnected to controller."""
if ctrl is not self._controller:
return
self.set_available(True)
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCH_CONTROLLER_RECONNECTED, controller_reconnected
)
)
@callback
def controller_update(ctrl: Controller) -> None:
"""Handle controller data updates."""
if ctrl is not self._controller:
return
self.async_schedule_update_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCH_CONTROLLER_UPDATE, controller_update
)
)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@callback
def set_available(self, available: bool, ex: Exception = None) -> None:
"""
Set availability for the controller.
Also sets zone availability as they follow the same availability.
"""
if self.available == available:
return
if available:
_LOGGER.info("Reconnected controller %s ", self._controller.device_uid)
else:
_LOGGER.info(
"Controller %s disconnected due to exception: %s",
self._controller.device_uid,
ex,
)
self._available = available
self.async_schedule_update_ha_state()
for zone in self.zones.values():
zone.async_schedule_update_ha_state()
@property
def device_info(self):
"""Return the device info for the iZone system."""
return self._device_info
@property
def unique_id(self):
"""Return the ID of the controller device."""
return self._controller.device_uid
@property
def name(self) -> str:
"""Return the name of the entity."""
return f"iZone Controller {self._controller.device_uid}"
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return self._supported_features
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def precision(self) -> float:
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
return {
"supply_temperature": show_temp(
self.hass,
self.supply_temperature,
self.temperature_unit,
self.precision,
),
"temp_setpoint": show_temp(
self.hass,
self._controller.temp_setpoint,
self.temperature_unit,
self.precision,
),
}
@property
def hvac_mode(self) -> str:
"""Return current operation ie. heat, cool, idle."""
if not self._controller.is_on:
return HVAC_MODE_OFF
mode = self._controller.mode
for (key, value) in self._state_to_pizone.items():
if value == mode:
return key
assert False, "Should be unreachable"
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available operation modes."""
if self._controller.free_air:
return [HVAC_MODE_OFF, HVAC_MODE_FAN_ONLY]
return [HVAC_MODE_OFF, *self._state_to_pizone]
@property
def preset_mode(self):
"""Eco mode is external air."""
return PRESET_ECO if self._controller.free_air else PRESET_NONE
@property
def preset_modes(self):
"""Available preset modes, normal or eco."""
if self._controller.free_air_enabled:
return [PRESET_NONE, PRESET_ECO]
return [PRESET_NONE]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
if self._controller.mode == Controller.Mode.FREE_AIR:
return self._controller.temp_supply
return self._controller.temp_return
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
if not self._supported_features & SUPPORT_TARGET_TEMPERATURE:
return None
return self._controller.temp_setpoint
@property
def supply_temperature(self) -> float:
"""Return the current supply, or in duct, temperature."""
return self._controller.temp_supply
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return 0.5
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting."""
return _IZONE_FAN_TO_HA[self._controller.fan]
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes."""
return list(self._fan_to_pizone)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._controller.temp_min
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._controller.temp_max
async def wrap_and_catch(self, coro):
"""Catch any connection errors and set unavailable."""
try:
await coro
except ConnectionError as ex:
self.set_available(False, ex)
else:
self.set_available(True)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
if not self.supported_features & SUPPORT_TARGET_TEMPERATURE:
self.async_schedule_update_ha_state(True)
return
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
await self.wrap_and_catch(self._controller.set_temp_setpoint(temp))
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
fan = self._fan_to_pizone[fan_mode]
await self.wrap_and_catch(self._controller.set_fan(fan))
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.wrap_and_catch(self._controller.set_on(False))
return
if not self._controller.is_on:
await self.wrap_and_catch(self._controller.set_on(True))
if self._controller.free_air:
return
mode = self._state_to_pizone[hvac_mode]
await self.wrap_and_catch(self._controller.set_mode(mode))
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode."""
await self.wrap_and_catch(
self._controller.set_free_air(preset_mode == PRESET_ECO)
)
async def async_turn_on(self) -> None:
"""Turn the entity on."""
await self.wrap_and_catch(self._controller.set_on(True))
class ZoneDevice(ClimateDevice):
"""Representation of iZone Zone."""
def __init__(self, controller: ControllerDevice, zone: Zone) -> None:
"""Initialise ZoneDevice."""
self._controller = controller
self._zone = zone
self._name = zone.name.title()
self._supported_features = 0
if zone.type != Zone.Type.AUTO:
self._state_to_pizone = {
HVAC_MODE_OFF: Zone.Mode.CLOSE,
HVAC_MODE_FAN_ONLY: Zone.Mode.OPEN,
}
else:
self._state_to_pizone = {
HVAC_MODE_OFF: Zone.Mode.CLOSE,
HVAC_MODE_FAN_ONLY: Zone.Mode.OPEN,
HVAC_MODE_HEAT_COOL: Zone.Mode.AUTO,
}
self._supported_features |= SUPPORT_TARGET_TEMPERATURE
self._device_info = {
"identifiers": {(IZONE, controller.unique_id, zone.index)},
"name": self.name,
"manufacturer": "IZone",
"via_device": (IZONE, controller.unique_id),
"model": zone.type.name.title(),
}
async def async_added_to_hass(self):
"""Call on adding to hass."""
@callback
def zone_update(ctrl: Controller, zone: Zone) -> None:
"""Handle zone data updates."""
if zone is not self._zone:
return
self._name = zone.name.title()
self.async_schedule_update_ha_state()
self.async_on_remove(
async_dispatcher_connect(self.hass, DISPATCH_ZONE_UPDATE, zone_update)
)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._controller.available
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return self._controller.assumed_state
@property
def device_info(self):
"""Return the device info for the iZone system."""
return self._device_info
@property
def unique_id(self):
"""Return the ID of the controller device."""
return "{}_z{}".format(self._controller.unique_id, self._zone.index + 1)
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def supported_features(self):
"""Return the list of supported features."""
try:
if self._zone.mode == Zone.Mode.AUTO:
return self._supported_features
return self._supported_features & ~SUPPORT_TARGET_TEMPERATURE
except ConnectionError:
return None
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
mode = self._zone.mode
for (key, value) in self._state_to_pizone.items():
if value == mode:
return key
return None
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return list(self._state_to_pizone.keys())
@property
def current_temperature(self):
"""Return the current temperature."""
return self._zone.temp_current
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._zone.type != Zone.Type.AUTO:
return None
return self._zone.temp_setpoint
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 0.5
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._controller.min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._controller.max_temp
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if self._zone.mode != Zone.Mode.AUTO:
return
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
await self._controller.wrap_and_catch(self._zone.set_temp_setpoint(temp))
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
mode = self._state_to_pizone[hvac_mode]
await self._controller.wrap_and_catch(self._zone.set_mode(mode))
self.async_schedule_update_ha_state()
@property
def is_on(self):
"""Return true if on."""
return self._zone.mode != Zone.Mode.CLOSE
async def async_turn_on(self):
"""Turn device on (open zone)."""
if self._zone.type == Zone.Type.AUTO:
await self._controller.wrap_and_catch(self._zone.set_mode(Zone.Mode.AUTO))
else:
await self._controller.wrap_and_catch(self._zone.set_mode(Zone.Mode.OPEN))
self.async_schedule_update_ha_state()
async def async_turn_off(self):
"""Turn device off (close zone)."""
await self._controller.wrap_and_catch(self._zone.set_mode(Zone.Mode.CLOSE))
self.async_schedule_update_ha_state()
|
|
#!/usr/bin/env python3
"""
BLIF/EBLIF parsing, writing and manipulation utilities.
BLIF format specification:
https://course.ece.cmu.edu/~ee760/760docs/blif.pdf
EBLIF format specification:
https://docs.verilogtorouting.org/en/latest/vpr/file_formats/#extended-blif-eblif
"""
import re
from collections import OrderedDict
# =============================================================================
class Cell:
"""
This class represents a single cell in a netlist
"""
def __init__(self, type):
# Cell name. This one is for reference only. It won't be writteb back
# with the .cname attribute. Use the cname field for that.
self.name = None
# Cell type (model)
self.type = type
# Ports and connections. Indexed by port specificatons
# (as <port>[<bit>]), contains net names.
self.ports = OrderedDict()
# For const sources ($const) this is the value of the constant
# For luts ($lut) this is a list holding the truth table
# For latches this is the initial value of the latch or None
self.init = None
# Extended EBLIF data
self.cname = None
self.attributes = OrderedDict() # name: value, strings
self.parameters = OrderedDict() # name: value, strings
def __str__(self):
return "{} ({})".format(self.type, self.name)
def __repr__(self):
return str(self)
class Eblif:
"""
This class represents a top-level module of a BLIF/EBLIF netlist
The class contains BLIF/EBLIF parser and serialized. The parser support
all EBLIF constructs except for .conn statements. It is possible to do a
parser -> serializer round trip which should generate identical file as
the one provided to the parser.
Netlist cells are stored using the Cell class (see above).
Cells defined as ".subckt <type>" are stored along with their type. Native
BLIF cells (.names, .latch etc.) are represented via the following
"built-in" cell models.
* $const - A constant generator (0-LUT). The output port is named "out"
and the init parameter is set to the constant value generated.
* $lut - N-input LUT. The input port is named "lut_in" and the output one
"lut_out". The init parameter contains the truth table. Log2 of size of
the table defines the LUT width.
* $latch - A generic ff/latch with unknown type. Common ports are named:
"D", "Q", and "clock". The init value specifies initial ff/latch state
a per BLIF specification.
* $fe, $re, $ah, $al, $as - A ff/latch of type corresponding to BLIF
definitions. Apart from different types these are identical to $latch
* $input, $output - These are used to represent top-level IO ports. The
cells have single port named "inpad" and "outpad" respectively. To
create / remove such cells use convert_ports_to_cells() and
convert_cells_to_ports() methods.
"""
def __init__(self, model):
# Top-level module name
self.model = model
# Top-level input and output nets
self.inputs = []
self.outputs = []
# Cells (by names)
self.cells = OrderedDict()
def add_cell(self, cell):
"""
Adds a cell. Generates its name if the cell doesn't have. Returns the
name under which the cell is stored.
"""
# No cell
if cell is None:
return None
# Use the cell name
if cell.name:
name = cell.name
# Generate a name
else:
index = 0
while True:
name = "{}{}".format(cell.type, index)
if name not in self.cells:
cell.name = name
break
# Add it
assert cell.name not in self.cells, cell.name
self.cells[cell.name] = cell
return name
def find_cell(self, name, use_cname=True):
"""
Finds a cell in the netlist given its name, When use_cname is True
then looks also by the cell's cname
"""
# Try by name
cell = self.cells.get(name, None)
# Try by cname if allowed
if cell is None and use_cname:
for c in self.cells.values():
if c.cname == name:
cell = c
break
return cell
def convert_ports_to_cells(self):
"""
Converts top-level input and output ports to $input and $output cells
"""
# Convert inputs
for port in self.inputs:
cell = Cell("$input")
cell.name = port
cell.ports["inpad"] = port
self.add_cell(cell)
# Convert outputs
for port in self.outputs:
cell = Cell("$output")
cell.name = "out:" + port
cell.cname = cell.name
cell.ports["outpad"] = port
self.add_cell(cell)
# Clear top-level ports
self.inputs = []
self.outputs = []
def convert_cells_to_ports(self):
"""
Converts $input and $output cells into top-level ports
"""
for key in list(self.cells.keys()):
cell = self.cells[key]
# Input
if cell.type == "$input":
assert "inpad" in cell.ports
name = cell.ports["inpad"]
self.inputs.append(name)
del self.cells[key]
# Output
if cell.type == "$output":
assert "outpad" in cell.ports
name = cell.name.replace("out:", "")
self.outputs.append(name)
del self.cells[key]
# Insert a buffer if the port name does not match the net name
net = cell.ports["outpad"]
if name != net:
cell = Cell("$lut")
cell.name = name
cell.ports["lut_in[0]"] = net
cell.ports["lut_out"] = name
cell.init = [0, 1]
self.add_cell(cell)
@staticmethod
def from_string(string):
"""
Parses a BLIF/EBLIF netlist as a multi-line string. Returns an Eblif
class instance.
"""
def parse_single_output_cover(parts):
"""
Parses a single output cover of a BLIF truth table.
"""
# FIXME: Add support for don't cares
if "-" in parts[0]:
assert False, "Don't cares ('-') not supported yet!"
# Assume only single address
addr = int(parts[0][::-1], 2)
data = int(parts[1])
yield addr, data
# Split lines, strip whitespace, remove blank ones
lines = string.split("\n")
lines = [line.strip() for line in lines]
lines = [line for line in lines if line]
eblif = None
cell = None
# Parse lines
for line_no, line in enumerate(lines):
fields = line.split()
# Reject comments
for i in range(len(fields)):
if fields[i].startswith("#"):
fields = fields[:i]
break
# Empty line
if not fields:
continue
# Look for .model
if fields[0] == ".model":
eblif = Eblif(fields[1])
continue
# No EBLIF yet
if not eblif:
continue
# Input list
if fields[0] == ".inputs":
eblif.inputs = fields[1:]
# Output list
elif fields[0] == ".outputs":
eblif.outputs = fields[1:]
# Got a generic cell
elif fields[0] == ".subckt":
assert len(fields) >= 2
eblif.add_cell(cell)
# Add the new cell
cell = Cell(fields[1])
# Add ports and net connections
for conn in fields[2:]:
port, net = conn.split("=", maxsplit=1)
cell.ports[port] = net
# Got a native flip-flop / latch
elif fields[0] == ".latch":
assert len(fields) >= 3
eblif.add_cell(cell)
# Add the new cell
cell = Cell("$latch")
# Input and output
cell.ports["D"] = fields[1]
cell.ports["Q"] = fields[2]
# Got type and control
if len(fields) >= 5:
cell.type = "$" + fields[3]
cell.ports["clock"] = fields[4]
# Use the output net as cell name
cell.name = cell.ports["Q"]
# Got initial value
if len(fields) >= 6:
cell.init = int(fields[5])
else:
cell.init = 3 # Unknown
# Got a native LUT
elif fields[0] == ".names":
assert len(fields) >= 2
eblif.add_cell(cell)
# Determine LUT width
width = len(fields[1:-1])
# Add the new cell
type = "$lut" if width > 0 else "$const"
cell = Cell(type)
# Initialize the truth table
if type == "$lut":
cell.init = [0 for i in range(2**width)]
elif type == "$const":
cell.init = 0
# Input connections
for i, net in enumerate(fields[1:-1]):
port = "lut_in[{}]".format(i)
cell.ports[port] = net
# Output connection
cell.ports["lut_out"] = fields[-1]
# Use the output net as cell name
cell.name = cell.ports["lut_out"]
# LUT truth table chunk
elif all([c in ("0", "1", "-") for c in fields[0]]):
assert cell is not None
assert cell.type in ["$lut", "$const"]
# The cell is a LUT
if cell.type == "$lut":
assert len(fields) == 2
for addr, data in parse_single_output_cover(fields):
cell.init[addr] = data
# The cell is a const source
elif cell.type == "$const":
assert len(fields) == 1
cell.init = int(fields[0])
# Cell name
elif fields[0] == ".cname":
cell.name = fields[1]
cell.cname = cell.name
# Cell attribute
elif fields[0] == ".attr":
cell.attributes[fields[1]] = fields[2]
# Cell parameter
elif fields[0] == ".param":
cell.parameters[fields[1]] = fields[2]
# End
elif fields[0] == ".end":
# FIXME: Mark that the end is reached and disregard following
# keywords
pass
# Unknown directive
else:
assert False, line
# Store the current cell
eblif.add_cell(cell)
return eblif
@staticmethod
def from_file(file_name):
"""
Parses a BLIF/EBLIF file. Returns an Eblif class instance.
"""
with open(file_name, "r") as fp:
string = fp.read()
return Eblif.from_string(string)
def to_string(self, cname=True, attr=True, param=True, consts=True):
"""
Formats EBLIF data as a multi-line EBLIF string. Additional parameters
control what kind of extended (EBLIF) data will be written.
"""
lines = []
# Header
lines.append(".model {}".format(self.model))
lines.append(".inputs {}".format(" ".join(self.inputs)))
lines.append(".outputs {}".format(" ".join(self.outputs)))
# Cells
for cell in self.cells.values():
# A constant source
if cell.type == "$const":
# Skip consts
if not consts:
continue
lines.append(".names {}".format(str(cell.ports["lut_out"])))
if cell.init != 0:
lines.append(str(cell.init))
# A LUT
elif cell.type == "$lut":
# Identify LUT input pins and their bind indices
nets = {}
for port, net in cell.ports.items():
match = re.fullmatch(r"lut_in\[(?P<index>[0-9]+)\]", port)
if match is not None:
index = int(match.group("index"))
nets[index] = net
# Write the cell header. Sort inputs by their indices
keys = sorted(nets.keys())
lines.append(
".names {} {}".format(
" ".join([nets[k] for k in keys]),
str(cell.ports["lut_out"])
)
)
# Write the truth table
fmt = "{:0" + str(len(nets)) + "b}"
tab = []
for addr, data in enumerate(cell.init):
if data != 0:
tab.append(fmt.format(addr)[::-1] + " 1")
lines.extend(sorted(tab))
# A latch
elif cell.type in ["$fe", "$re", "$ah", "$al", "$as"]:
line = ".latch {} {} {} {} {}".format(
str(cell.ports["D"]), str(cell.ports["Q"]), cell.type[1:],
str(cell.ports["clock"]), str(cell.init)
)
lines.append(line)
# A generic latch controlled by a single global clock
elif cell.type == "$latch":
line = ".latch {} {} {}".format(
str(cell.ports["D"]), str(cell.ports["Q"]), str(cell.init)
)
lines.append(line)
# A generic subcircuit
else:
# The subcircuit along with its connections
line = ".subckt {}".format(cell.type)
for port, net in cell.ports.items():
line += " {}={}".format(port, net)
lines.append(line)
# Cell name
if cname and cell.cname:
lines.append(".cname {}".format(cell.cname))
# Cell attributes
if attr:
for k, v in cell.attributes.items():
lines.append(".attr {} {}".format(k, v))
# Cell parameters
if param:
for k, v in cell.parameters.items():
lines.append(".param {} {}".format(k, v))
# Footer
lines.append(".end")
# Join all lines
return "\n".join(lines)
def to_file(self, file_name, **kw):
"""
Writes EBLIF data to a file
"""
with open(file_name, "w") as fp:
fp.write(self.to_string(**kw))
|
|
"""
HTML Widget classes
"""
import copy
import datetime
import re
import warnings
from contextlib import suppress
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
class MediaOrderConflictWarning(RuntimeWarning):
pass
@html_safe
class Media:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, 'css', {})
js = getattr(media, 'js', [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css = css
self._js = js
def __repr__(self):
return 'Media(css=%r, js=%r)' % (self._css, self._js)
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(list_1, list_2):
"""
Merge two lists while trying to keep the relative order of the elements.
Warn if the lists have the same two elements in a different relative
order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
# Start with a copy of list_1.
combined_list = list(list_1)
last_insert_index = len(list_1)
# Walk list_2 in reverse, inserting each element into combined_list if
# it doesn't already exist.
for path in reversed(list_2):
try:
# Does path already exist in the list?
index = combined_list.index(path)
except ValueError:
# Add path to combined_list since it doesn't exist.
combined_list.insert(last_insert_index, path)
else:
if index > last_insert_index:
warnings.warn(
'Detected duplicate Media files in an opposite order:\n'
'%s\n%s' % (combined_list[last_insert_index], combined_list[index]),
MediaOrderConflictWarning,
)
# path already exists in the list. Update last_insert_index so
# that the following elements are inserted in front of this one.
last_insert_index = index
return combined_list
def __add__(self, other):
combined = Media()
combined._js = self.merge(self._js, other._js)
combined._css = {
medium: self.merge(self._css.get(medium, []), other._css.get(medium, []))
for medium in self._css.keys() | other._css.keys()
}
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
attrs = base_attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format if format else None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
if selected and not has_selected:
has_selected = True
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
context['wrap_label'] = True
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
with suppress(AttributeError):
getter = data.getlist
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = 'multiple'
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return (isinstance(value, str) and not bool(value)) or value is None
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('1', _('Unknown')),
('2', _('Yes')),
('3', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
return '1'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, str(i)) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_attrs = context['widget']['attrs'].copy()
year_name = self.year_field % name
year_attrs['id'] = 'id_%s' % year_name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs=year_attrs,
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_attrs = context['widget']['attrs'].copy()
month_name = self.month_field % name
month_attrs['id'] = 'id_%s' % month_name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs=month_attrs,
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_attrs = context['widget']['attrs'].copy()
day_name = self.day_field % name
day_attrs['id'] = 'id_%s' % day_name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs=day_attrs,
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
d = datetime.datetime.strptime(value, input_format)
except ValueError:
pass
else:
year, month, day = d.year, d.month, d.day
match = self.date_re.match(value)
if match:
year, month, day = [int(val) for val in match.groups()]
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
|
|
#!/usr/bin/env python
class Node(object):
def __init__(self, key):
self.__key = key
self.__parent = None
self.__left_child = None
self.__sibling = None
def __lt__(self, node):
"""Making nodes sortable"""
return self.key < node.key
@property
def key(self):
return self.__key
@key.setter
def key(self, key):
self.__key = key
@property
def left_child(self):
return self.__left_child
@left_child.setter
def left_child(self, left_child):
self.__left_child = left_child
if left_child.parent is None:
left_child.parent = self
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, parent):
self.__parent = parent
@property
def sibling(self):
return self.__sibling
@sibling.setter
def sibling(self, sibling):
self.__sibling = sibling
def __repr__(self):
return '(key: "{}")'.format(self.key)
def more(self):
"""More info about the node"""
node = 'N: (\n\tkey: "{}"\n\tdeg: "{}'.format(self.key, len(self))
if self.parent is not None:
node += '\n\tparent: "{}"'.format(self.parent.key)
if self.siblings is not None:
node += '\n\tsibling: "{}"'.format(self.sibling)
if self.left_child is not None:
node += '\n\tleft child: "{}"'.format(self.left_child.key)
return node + '\n)'
class Tree(object):
def __init__(self, root):
self.__root = root
def __lt__(self, tree):
"""Making trees sortable"""
return self.root < tree.root
def __len__(self):
"""Returns order of the tree
The order of the tree will be given by the
degree of the leftmost child
"""
order = 0
node = self.__root
#traverse childs until last
while node.left_child is not None:
node = node.left_child
order += 1
return order
@property
def root(self):
return self.__root
def __repr__(self):
return 'T: (root: {}\norder: {})'.format(self.root, len(self))
class Heap(object):
def __init__(self, trees=[]):
self.__trees = {}
#load tree list into a dicionary
#key: degree of tree, value: tree itself
for tree in trees:
order = len(tree)
self.__trees[order] = tree
def __len__(self):
return len(self.__trees.keys())
def __getitem__(self, item):
return self.__trees[item]
@property
def orders(self):
return self.__trees.keys()
def remove_minimal(self):
"""Remove the smallest element of heap
Select smallest root of al trees.
Deletes that key and appends the new trees (if any)
to the trees dictionary
Returns:
the minimal tree now out of the heap
"""
min_order = min(self.__trees.keys())
min_tree = self.__trees.pop(min_order)
"""
now we have the smallest tree
we must rearrenge the heap by removing the root
and allocating its subtrees into the heap again
create trees from left child and travel siblings
append those trees to heap
"""
#get 1st generation siblings
left_child = min_tree.root.left_child
if left_child:
children = []
children.append(Tree(left_child))
while left_child.sibling:
sibling = left_child.sibling
child_tree = Tree(sibling)
children.append(child_tree)
left_child = sibling
#put the children again into heap
for tree in children:
self.append(tree)
return min_tree
def append(self, tree):
"""Appends a new tree in the heap
A new tree wants to go in:
1. Check order of this tree
2. If there is no tree with that order, just append
3. If there is another tree with same order, merge (insert())
4. Take account of the merged trees and remove them from dictionary
5. Repeat
"""
order = len(tree)
if order not in self.__trees.keys():
self.__trees[order] = tree
else:
#invalid state, merge trees of same order
new_tree = insert(tree, self.__trees[order])
#after the insertion we have a new tree of new order
#so we need to remove the previous order
self.__trees.pop(order)
#and see if we have any other trees with same order in our heap
self.append(new_tree)
def __repr__(self):
return str(self.__trees)
def build_heap(trees):
return heapify(Heap(trees))
def remove_minimal(heap):
minimal = heap.trees[0]
for tree in heap.trees:
if tree.root < minimal.root:
minimal = tree
tmp = minimal.sub_trees
heap.remove(minimal)
merge(heap, tmp)
def heapify(tree):
"""Rearrange a heap to maintain the heap property
key of the root node must be more extreme than its children's key.
In case the root key is not more extreme, swap it with the most extreme child's key
and heapify child's subtree
"""
root = tree.root
min_root = root
for child in root.children:
#if our child key is less than the root, save new minimum and swap
if child.key < root.key:
min_root = child
child.key, root.key = root.key, child.key
return heapify(min_root)
def insert(tree_1, tree_2):
"""Inserts one tree into another
Check if the order of trees is the same
After that, put the highest root as left most child
of the lowest root
Parameters:
tree_1, tree_2: Trees to compare and join
Returns:
tree with lowest root key
"""
if len(tree_1) != len(tree_2):
raise ValueError
root_1, root_2 = tree_1.root, tree_2.root
if root_1 < root_2:
if root_1.left_child is not None:
#put previous left child as sibling
root_2.sibling = root_1.left_child
root_1.left_child = root_2
return tree_1
else:
if root_2.left_child is not None:
root_1.sibling = root_2.left_child
root_2.left_child = root_1
return tree_2
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates test runner factory and tests for GTests."""
# pylint: disable=W0212
import logging
import os
import sys
from pylib import constants
from pylib.base import base_setup
from pylib.base import base_test_result
from pylib.base import test_dispatcher
from pylib.device import device_utils
from pylib.gtest import test_package_apk
from pylib.gtest import test_package_exe
from pylib.gtest import test_runner
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import unittest_util # pylint: disable=F0401
ISOLATE_FILE_PATHS = {
'base_unittests': 'base/base_unittests.isolate',
'blink_heap_unittests':
'third_party/WebKit/Source/platform/heap/BlinkHeapUnitTests.isolate',
'breakpad_unittests': 'breakpad/breakpad_unittests.isolate',
'cc_perftests': 'cc/cc_perftests.isolate',
'components_unittests': 'components/components_unittests.isolate',
'content_browsertests': 'content/content_browsertests.isolate',
'content_unittests': 'content/content_unittests.isolate',
'media_perftests': 'media/media_perftests.isolate',
'media_unittests': 'media/media_unittests.isolate',
'net_unittests': 'net/net_unittests.isolate',
'sql_unittests': 'sql/sql_unittests.isolate',
'sync_unit_tests': 'sync/sync_unit_tests.isolate',
'ui_base_unittests': 'ui/base/ui_base_tests.isolate',
'unit_tests': 'chrome/unit_tests.isolate',
'webkit_unit_tests':
'third_party/WebKit/Source/web/WebKitUnitTests.isolate',
}
# Used for filtering large data deps at a finer grain than what's allowed in
# isolate files since pushing deps to devices is expensive.
# Wildcards are allowed.
DEPS_EXCLUSION_LIST = [
'chrome/test/data/extensions/api_test',
'chrome/test/data/extensions/secure_shell',
'chrome/test/data/firefox*',
'chrome/test/data/gpu',
'chrome/test/data/image_decoding',
'chrome/test/data/import',
'chrome/test/data/page_cycler',
'chrome/test/data/perf',
'chrome/test/data/pyauto_private',
'chrome/test/data/safari_import',
'chrome/test/data/scroll',
'chrome/test/data/third_party',
'third_party/hunspell_dictionaries/*.dic',
# crbug.com/258690
'webkit/data/bmp_decoder',
'webkit/data/ico_decoder',
]
def _GetDisabledTestsFilterFromFile(suite_name):
"""Returns a gtest filter based on the *_disabled file.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
Returns:
A gtest filter which excludes disabled tests.
Example: '*-StackTrace.*:StringPrintfTest.StringPrintfMisc'
"""
filter_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'filter', '%s_disabled' % suite_name)
if not filter_file_path or not os.path.exists(filter_file_path):
logging.info('No filter file found at %s', filter_file_path)
return '*'
filters = [x for x in [x.strip() for x in file(filter_file_path).readlines()]
if x and x[0] != '#']
disabled_filter = '*-%s' % ':'.join(filters)
logging.info('Applying filter "%s" obtained from %s',
disabled_filter, filter_file_path)
return disabled_filter
def _GetTests(test_options, test_package, devices):
"""Get a list of tests.
Args:
test_options: A GTestOptions object.
test_package: A TestPackageApk object.
devices: A list of attached devices.
Returns:
A list of all the tests in the test suite.
"""
class TestListResult(base_test_result.BaseTestResult):
def __init__(self):
super(TestListResult, self).__init__(
'gtest_list_tests', base_test_result.ResultType.PASS)
self.test_list = []
def TestListerRunnerFactory(device, _shard_index):
class TestListerRunner(test_runner.TestRunner):
def RunTest(self, _test):
result = TestListResult()
self.test_package.Install(self.device)
result.test_list = self.test_package.GetAllTests(self.device)
results = base_test_result.TestRunResults()
results.AddResult(result)
return results, None
return TestListerRunner(test_options, device, test_package)
results, _no_retry = test_dispatcher.RunTests(
['gtest_list_tests'], TestListerRunnerFactory, devices)
tests = []
for r in results.GetAll():
tests.extend(r.test_list)
return tests
def _FilterTestsUsingPrefixes(all_tests, pre=False, manual=False):
"""Removes tests with disabled prefixes.
Args:
all_tests: List of tests to filter.
pre: If True, include tests with PRE_ prefix.
manual: If True, include tests with MANUAL_ prefix.
Returns:
List of tests remaining.
"""
filtered_tests = []
filter_prefixes = ['DISABLED_', 'FLAKY_', 'FAILS_']
if not pre:
filter_prefixes.append('PRE_')
if not manual:
filter_prefixes.append('MANUAL_')
for t in all_tests:
test_case, test = t.split('.', 1)
if not any([test_case.startswith(prefix) or test.startswith(prefix) for
prefix in filter_prefixes]):
filtered_tests.append(t)
return filtered_tests
def _FilterDisabledTests(tests, suite_name, has_gtest_filter):
"""Removes disabled tests from |tests|.
Applies the following filters in order:
1. Remove tests with disabled prefixes.
2. Remove tests specified in the *_disabled files in the 'filter' dir
Args:
tests: List of tests.
suite_name: Name of the test suite (e.g. base_unittests).
has_gtest_filter: Whether a gtest_filter is provided.
Returns:
List of tests remaining.
"""
tests = _FilterTestsUsingPrefixes(
tests, has_gtest_filter, has_gtest_filter)
tests = unittest_util.FilterTestNames(
tests, _GetDisabledTestsFilterFromFile(suite_name))
return tests
def Setup(test_options, devices):
"""Create the test runner factory and tests.
Args:
test_options: A GTestOptions object.
devices: A list of attached devices.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_package = test_package_apk.TestPackageApk(test_options.suite_name)
if not os.path.exists(test_package.suite_path):
exe_test_package = test_package_exe.TestPackageExecutable(
test_options.suite_name)
if not os.path.exists(exe_test_package.suite_path):
raise Exception(
'Did not find %s target. Ensure it has been built.\n'
'(not found at %s or %s)'
% (test_options.suite_name,
test_package.suite_path,
exe_test_package.suite_path))
test_package = exe_test_package
logging.warning('Found target %s', test_package.suite_path)
base_setup.GenerateDepsDirUsingIsolate(test_options.suite_name,
test_options.isolate_file_path,
ISOLATE_FILE_PATHS,
DEPS_EXCLUSION_LIST)
def push_data_deps_to_device_dir(device):
device_dir = (constants.TEST_EXECUTABLE_DIR
if test_package.suite_name == 'breakpad_unittests'
else device.GetExternalStoragePath())
base_setup.PushDataDeps(device, device_dir, test_options)
device_utils.DeviceUtils.parallel(devices).pMap(push_data_deps_to_device_dir)
tests = _GetTests(test_options, test_package, devices)
# Constructs a new TestRunner with the current options.
def TestRunnerFactory(device, _shard_index):
return test_runner.TestRunner(
test_options,
device,
test_package)
if test_options.run_disabled:
test_options = test_options._replace(
test_arguments=('%s --gtest_also_run_disabled_tests' %
test_options.test_arguments))
else:
tests = _FilterDisabledTests(tests, test_options.suite_name,
bool(test_options.gtest_filter))
if test_options.gtest_filter:
tests = unittest_util.FilterTestNames(tests, test_options.gtest_filter)
# Coalesce unit tests into a single test per device
if test_options.suite_name != 'content_browsertests':
num_devices = len(devices)
tests = [':'.join(tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
return (TestRunnerFactory, tests)
|
|
from tkinter import *
from tkinter import ttk
from collections import defaultdict
from tkinter import messagebox
class loadingGUI():
def __init__(self, master):
self.master = master
self.master.title("Dataset selection")
master.protocol("WM_DELETE_WINDOW", self.catch_destroy)
# Set Button style
s = ttk.Style()
s.configure('Wait.TButton',foreground = 'red', state = 'disabled')
s.configure('Go.TButton', foreground = 'green', state = 'active')
# Initialise variables here
self.base_params = {'host': "mf2.dit.ie:8080",
'layer': "cso:ctygeom",
'srs_code': 29902,
'properties': "",
'geom_field': "",
'filter_property': "",
'filter_values': ""} # dict to store the fetch params
self.param1 = StringVar()
self.param2 = StringVar()
self.param3 = StringVar()
self.param4 = StringVar()
self.param5 = StringVar()
self.param6 = StringVar()
self.param7 = StringVar()
self.gis_stack_text = StringVar()
self.info_text = StringVar()
self.selected_item = StringVar()
self.meta_list = StringVar()
self.data_headings_list = StringVar()
self.data_item = StringVar()
self.data_dict = {}
self.gis_stack = [] # out_stack to store items to send to GIS
self.params_list = [self.param1,
self.param2,
self.param3,
self.param4,
self.param5,
self.param6,
self.param7] # list to allow iterative assignment and retrieval of
# params
self.gj_stack = defaultdict(list) #out_stack to store geojson objects retrieved
# Initialise the widgets
self.mainframe = ttk.Frame(self.master)
self.label1 = ttk.Label(self.mainframe,
text = "THIS GUI SUPPORTS INTERACTION WITH\n"+
"A GEOSERVER.",
foreground = 'black',
relief = 'sunken',
font =('Helvetica', '12'),
justify = 'center',
anchor = 'center')
self.label2 = ttk.Label(self.mainframe,
text = "Please use buttons to select datasets or enter custom\n"
+ "parameters using the boxes on the left",
foreground = 'blue',
relief = 'sunken',
anchor = 'center')
self.entry_frame = ttk.LabelFrame(self.mainframe,
text = 'Enter parameters here:',
relief = 'sunken')
self.display_frame = ttk.LabelFrame(self.mainframe,
text = 'Current Parameters:',
relief = 'sunken')
self.button_frame = ttk.LabelFrame(self.mainframe,
text = 'Select one of the datasets\n' +
'by clicking the button',
relief = 'sunken')
self.geojson_nav_frame = ttk.LabelFrame(self.mainframe,
text = 'Please explore the gj_stack here',
relief = 'sunken')
self.entry1 = ttk.Entry(self.entry_frame,
textvariable = self.param1)
self.entry2 = ttk.Entry(self.entry_frame,
textvariable = self.param2)
self.entry3 = ttk.Entry(self.entry_frame,
textvariable = self.param3)
self.entry4 = ttk.Entry(self.entry_frame,
textvariable = self.param4)
self.entry5 = ttk.Entry(self.entry_frame,
textvariable = self.param5)
self.entry6 = ttk.Entry(self.entry_frame,
textvariable = self.param6)
self.entry7 = ttk.Entry(self.entry_frame,
textvariable = self.param7)
self.lbl_p1 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'host:')
self.lbl_p2 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'layer')
self.lbl_p3 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'spatial ref:')
self.lbl_p4 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'properties:')
self.lbl_p5 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'geom field:')
self.lbl_p6 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'filter field:')
self.lbl_p7 = ttk.Label(self.entry_frame,
foreground = 'green',
text = 'filter criteria:')
self.button_load_params = ttk.Button(self.entry_frame,
text = "^ Load ^",
command = self.load_params)
self.display1 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display2 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display3 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display4 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display5 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display6 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.display7 = ttk.Label(self.display_frame,
foreground = 'red',
anchor = 'center',
padding = 1)
self.button_County = ttk.Button(self.button_frame,
text = 'County Polygons',
command = self.county_polygons)
self.button_Towns = ttk.Button(self.button_frame,
text = 'Town Points',
command = self.town_points)
self.button_LargeTowns = ttk.Button(self.button_frame,
text = 'Large Town Points',
command = self.large_town_points)
self.button_EDs = ttk.Button(self.button_frame,
text = 'ED Polygons',
command = self.ed_polygons)
self.button_Provinces = ttk.Button(self.button_frame,
text = 'Province Polygons',
command = self.province_polygons)
self.button_SAs = ttk.Button(self.button_frame,
text = 'SA Polygons',
command = self.sa_polygons)
self.button_Fetch = ttk.Button(self.display_frame,
text = '^ FETCH ^',
command = self.fetch_geojson)
self.geoj_cb = ttk.Combobox(self.geojson_nav_frame,
state = 'disabled')
self.button_gis_stack = ttk.Button(self.geojson_nav_frame,
text = 'Add to GIS Stack',
style = 'Go.TButton',
command = self.add_to_stack)
self.button_inspect_item = ttk.Button(self.geojson_nav_frame,
text = 'Inspect Item',
style = 'Wait.TButton',
command = self.inspect_item)
self.lbl_data1 = ttk.Label(self.geojson_nav_frame,
foreground = 'blue',
anchor = 'center',
text = 'Top Level Properties')
self.lbl_data2 = ttk.Label(self.geojson_nav_frame,
foreground = 'blue',
anchor = 'center',
text = 'Feature Properties')
self.frm_geoj_op = ttk.Frame(self.geojson_nav_frame)
self.lbl_data_example = ttk.Label(self.frm_geoj_op,
foreground = 'blue',
anchor = 'center',
text = 'Example Properties')
self.meta_tb = Listbox(self.geojson_nav_frame,
exportselection = 0,
bd = 5,
width = 40,
selectmode = SINGLE,
listvariable = self.meta_list
)
self.data_tb = Listbox(self.geojson_nav_frame,
exportselection = 0,
bd = 5,
width = 20,
selectmode = SINGLE,
listvariable = self.data_headings_list
)
self.lbl_gis_stack = ttk.Label(self.frm_geoj_op,
text = 'GIS Stack',
foreground = 'blue')
self.tb_data = ttk.Label(self.frm_geoj_op,
textvariable = self.data_item,
relief = 'sunken',
background = 'white',
)
self.tb_geoj_stack = ttk.Label(self.frm_geoj_op,
textvariable = self.gis_stack_text,
relief = 'sunken',
background = 'white',
)
self.info_label = Label(self.mainframe,
textvariable = self.info_text,
relief = 'sunken',
anchor = 'center')
self.info_text.set('Use the dialog above to explore the datasets')
self.mainframe.grid(row=0, column = 0)
self.label1.grid(row = 0, column = 0, columnspan = 4, sticky = 'ew')
self.entry_frame.grid(row = 2, column = 0, sticky = 'ns')
self.lbl_p1.grid(row = 0, column = 0, sticky = 'ew')
self.lbl_p2.grid(row = 1, column = 0, sticky = 'ew')
self.lbl_p3.grid(row = 2, column = 0, sticky = 'ew')
self.lbl_p4.grid(row = 3, column = 0, sticky = 'ew')
self.lbl_p5.grid(row = 4, column = 0, sticky = 'ew')
self.lbl_p6.grid(row = 5, column = 0, sticky = 'ew')
self.lbl_p7.grid(row = 6, column = 0, sticky = 'ew')
self.button_Fetch.grid(row = 7, column = 0,
columnspan= 2,
sticky = 'ew')
self.entry1.grid(row = 0, column = 1, sticky = 'ew')
self.entry2.grid(row = 1, column = 1, sticky = 'ew')
self.entry3.grid(row = 2, column = 1, sticky = 'ew')
self.entry4.grid(row = 3, column = 1, sticky = 'ew')
self.entry5.grid(row = 4, column = 1, sticky = 'ew')
self.entry6.grid(row = 5, column = 1, sticky = 'ew')
self.entry7.grid(row = 6, column = 1, sticky = 'ew')
self.button_load_params.grid(row = 7, column = 1, sticky = 'ew')
self.display_frame.grid(row = 2, column = 1, sticky = 'ns')
self.display1.grid(row = 0, sticky = 'ew')
self.display2.grid(row = 1, sticky = 'ew')
self.display3.grid(row = 2, sticky = 'ew')
self.display4.grid(row = 3, sticky = 'ew')
self.display5.grid(row = 4, sticky = 'ew')
self.display6.grid(row = 5, sticky = 'ew')
self.display7.grid(row = 6, sticky = 'ew')
for child, i in zip(self.display_frame.winfo_children(), self.params_list):
child.configure(text = i.get())
self.button_frame.grid(row = 2, column = 2, sticky = 'ns')
self.button_LargeTowns.grid(row = 0, sticky = 'ew')
self.button_County.grid(row = 1, sticky = 'ew')
self.button_EDs.grid(row = 2, sticky = 'ew')
self.button_Provinces.grid(row = 3, sticky = 'ew')
self.button_SAs.grid(row = 4, sticky = 'ew')
self.button_Towns.grid(row = 5, sticky = 'ew')
self.geojson_nav_frame.grid(row = 3, column = 0,
columnspan = 4, sticky = 'ew')
self.frm_geoj_op.grid(row = 2, column = 2, stick = 'n')
self.geoj_cb.grid(row = 0, column = 0,
columnspan = 2, sticky = 'nw')
self.button_inspect_item.grid(row = 0, column = 1)
self.button_gis_stack.grid(row = 0, column = 2)
self.lbl_data1.grid(row = 1, column = 0)
self.lbl_data2.grid(row = 1, column = 1)
self.meta_tb.grid(row = 2, column = 0)
self.data_tb.grid(row = 2, column = 1)
self.lbl_data_example.grid(row = 0, column = 0, sticky = 'new')
self.tb_data.grid(row = 1, column = 0, sticky = 'new')
self.lbl_gis_stack.grid(row = 2, column = 0, sticky ='new')
self.tb_geoj_stack.grid(row = 3, column = 0, sticky = 'nsew')
self.label2.grid(row = 1, column = 0, columnspan = 4, sticky = 'ew')
self.info_label.grid(row = 4, column = 0, columnspan = 4, sticky = 'ew')
#Event handling
self.geoj_cb_value = self.geoj_cb.bind("<<ComboboxSelected>>", self.geoj_cb_selection)
self.data_tb_value = self.data_tb.bind("<<ListboxSelect>>", self.item_selection)
def geoj_cb_selection(self, event):
#TODO function which blanks the display boxes when this changes
owner = event.widget
self.selected_item.set(owner.get())
def item_selection(self, event):
owner = event.widget
line = owner.get(owner.curselection())
item_str = str(self.data_dict[line])
if len(item_str) > 30:
item_str = "{}{}".format(item_str[:25],'....')
self.data_item.set(item_str)
def add_to_stack(self):
new_item = self.selected_item.get()
stack_contents = self.gis_stack_text.get()
if new_item in stack_contents:
self.info_text.set('You already have this item in the out_stack;\n' +
'Please choose anoher or proceed to GIS')
pass
else:
self.gis_stack.append(self.gj_stack[new_item])
self.gis_stack_text.set(stack_contents + new_item + '\n')
def inspect_item(self):
if self.selected_item.get() != "":
item = self.gj_stack[self.selected_item.get()]
meta, data_hdgs, data_dict = self.geoj_exploder(item)
self.meta_list.set(meta)
self.data_headings_list.set(data_hdgs)
self.data_dict = data_dict
self.info_text.set('Please select the name of the feature from the Feature Properties list')
else:
self.info_text.set('There is no item selected.')
pass
def geoj_exploder(self, gj_obj):
l1 = [(k,v) for k,v in gj_obj.items()]
i = gj_obj['features'][0]['properties']
l2 = list(i.keys())
d = i
return [l1, l2, d]
def send_to_gis(self):
#TODO add check to see if feature name is highlighted
#TODO add item to hold best name of feature
if self.data_item.get() == '':
self.info_text.set('Please highlight the feature name and send again:')
pass
else:
item = self.gj_stack[self.selected_item.get()]
self.gis_stack.append(item)
def load_params(self):
for child, i in zip(self.display_frame.winfo_children(), self.params_list):
child.configure(text = i.get())
def county_polygons(self):
self.params_list[1].set('cso:ctygeom')
self.load_params()
def town_points(self):
self.params_list[1].set('dit:geonames_populated')
self.load_params()
def sa_polygons(self):
self.params_list[1].set('cso:sageom')
self.load_params()
def large_town_points(self):
self.params_list[1].set('dit:geonames_pop_5000')
self.load_params()
def province_polygons(self):
self.params_list[1].set('cso:prgeom')
self.load_params()
def ed_polygons(self):
self.params_list[1].set('cso:edgeom')
self.load_params()
def fetch_geojson(self):
#TODO Set styles to show when gj_stack is loading
btn = self.button_Fetch
btn.configure(style = 'Wait.TButton')
self.param1.set(self.base_params['host'])
self.param3.set(self.base_params['srs_code'])
self.param4.set(self.base_params['properties'])
self.param5.set(self.base_params['geom_field'])
self.param6.set(self.base_params['filter_property'])
self.param7.set(self.base_params['filter_values'])
self.base_params['host'] = self.param1.get()
self.base_params['layer'] = self.param2.get()
self.base_params['srs_code'] = self.param3.get()
self.base_params['properties'] = self.param4.get()
self.base_params['geom_field'] = self.param5.get()
self.base_params['filter_property'] = self.param6.get()
self.base_params['filter_values'] = self.param7.get()
gj = self.get_geojson(self.base_params)
# create a out_stack of the geojson objects, only storing each one once
self.gj_stack[self.base_params['layer']] = gj
self.update_geoj_cb(self.gj_stack)
def update_geoj_cb(self, adict):
self.geoj_cb['values'] = [i for i in adict.keys()]
self.geoj_cb.state(['!disabled', 'readonly'])
def catch_destroy(self):
if messagebox.askokcancel("Quit", "Do you really want to quit?"):
self.master.destroy()
def get_geojson(self, params):
"""
This function accepts a dictionary of parameters and returns a GeoJSON representation of the requested layer. This
takes a format similar to the following example:
{
"host": "mf2.dit.ie:8080",
"layer": "cso:ctygeom",
"srs_code": 29902,
"properties": ["countyname", ],
"geom_field": "geom",
"filter_property": "countyname",
"filter_values": ["Cork", "Kerry"]
}
You can filter the set of features returned by adjusting "filter_values". This is a list of values that must
be present in "filter_property". In the above example you'd get the counties of Cork and Kerry plus Cork City.
Similarly, you can filter the properties returned to reduce their number. If you use this feature, you'll need to
set "geom_field" to the name of the geometry field. Geoserver can give you this.
All values in the dictionary are optional except "host" and "layer".
:param Dictionary as above:
:return: Parsed GeoJSON or exception as appropriate
"""
import urllib.parse
import httplib2
import os, os.path
import json
import xml.etree.ElementTree as etree
#
# Check that the parameters exist and/or sensible. Because the filter can contain some 'odd' characters such as '%'
# and single quotes the filter text needs to be url encoded so that text like "countyname LIKE '%Cork%'" becomes
# "countyname%20LIKE%20%27%25Cork%25%27" which is safer for URLs
#
if "host" not in params:
raise ValueError("Value for 'host' required")
if "layer" not in params:
raise ValueError("Value for 'layer' required")
if "srs_code" in params and params["srs_code"]:
srs_text = "&srsName=epsg:{}".format(params["srs_code"])
else:
srs_text = ""
if "properties" in params and params["properties"]:
item_string = ""
for item in params["properties"]:
item_string += str(item) + ","
if "geom_field" in params and params["geom_field"]:
item_string += str(params["geom_field"])
property_text = "&PROPERTYNAME={}".format(item_string)
else:
property_text = ""
if "filter_property" in params and params["filter_property"] and params["filter_values"]:
filter_text = "{filter_property} LIKE '%{filter_values}%'".format(filter_property=params["filter_property"], filter_values=params["filter_values"][0])
for item in range(1, len(params["filter_values"])):
filter_text += "OR {filter_property} LIKE '%{filter_values}%'".format(filter_property=params["filter_property"], filter_values=params["filter_values"][item])
filter_text = urllib.parse.quote(filter_text)
filter_text = "&CQL_FILTER=" + filter_text
else:
filter_text = ""
url = "http://{host}/geoserver/ows?" \
"service=WFS&version=1.0.0&" \
"request=GetFeature&" \
"typeName={layer}&" \
"outputFormat=json".format(host=params["host"], layer=params["layer"])
url += srs_text
url += property_text
url += filter_text
#
# Make a directory to hold downloads so that we don't have to repeatedly download them later, i.e. they already
# exist so we get them from a local directory. This directory is called .httpcache".
#
scriptDir = 'C:\\Python34'
cacheDir = os.path.join(scriptDir, ".httpcache")
if not os.path.exists(cacheDir):
os.mkdir(cacheDir)
#
# Go to the web and attempt to get the resource
#
try:
h = httplib2.Http(cacheDir)
response_headers, response = h.request(url)
response = response.decode()
#
# Geoserver only sends valid gj_stack in the requested format, in our case GeoJSON, so if we get a response back in
# XML format we know that we have an error. We do minimal parsing on the xml to extract the error text and raise
# an exception based on it.
#
if response[:5] == "<?xml":
response = etree.fromstring(response)
xml_error = ""
for element in response:
xml_error += element.text
raise Exception(xml_error)
else:
return json.loads(response)
except httplib2.HttpLib2Error as e:
print(e)
def main():
root = Tk()
loadingGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
|
|
"""Fab file for the blog project (install on Vex.net)"""
from fabric.api import *
from fabric.contrib import project
from fabric.contrib.files import exists
import os, glob, datetime, subprocess
env.user = 'mcfletch'
env.product = 'webtoys'
env.product_dir = os.path.join('/opt',env.product,'current' )
env.virtual_env = os.path.join(env.product_dir,'env' )
env.django_admin = 'DJANGO_SETTINGS_MODULE=webtoys.settings '+ os.path.join( env.virtual_env,'bin','django-admin.py' )
ssh_key = os.path.expanduser( os.path.join( '~', '.copytohosts', '.ssh','authorized_keys' ))
HERE = os.path.dirname( __file__ )
REQUIREMENTS_FILE = os.path.join( HERE, 'requirements.txt' )
ETC_SOURCE = os.path.join( HERE, 'etc' )
VAR_SOURCE = os.path.join( HERE, 'var' )
OPT_SOURCE = os.path.join( HERE, 'opt' )
HOME_SOURCE = os.path.join( HERE, 'home', env.user )
DEPENDENCIES = [
'supervisor',
'nginx',
'libpq-dev',
'python-dev',
'python-psycopg2',
'python-virtualenv',
'python-psycopg2',
'python-imaging',
'python-tz',
'openjdk-7-jre-headless', # webassets requirement...
'libav-tools',
'espeak',
'git',
]
def find_dist( name ):
"""Find the full path of the latest sdist of name"""
package_dir = os.path.dirname( __import__( name ).__file__ )
return find_setup( package_dir )
def find_setup( package_dir ):
"""Find setup.py file in the parents of package_dir"""
while package_dir:
packer = os.path.join( package_dir, 'setup.py' )
if os.path.exists( packer ):
return package_dir
new_package_dir = os.path.dirname( package_dir )
if new_package_dir == package_dir:
break
package_dir = new_package_dir
raise RuntimeError( "Unable to find package for %s"%( package_dir, ))
def forceinstall( source ):
"""Force install given project source into environment"""
virtual_env = env.virtual_env
filename = upload_project( find_dist(source) )
sudo( '%(virtual_env)s/bin/pip install --force -I --no-deps %(filename)s'%locals() )
WEBTOYS = find_dist( 'webtoys' )
TOYS = find_dist('toys')
PROJECT_SOURCES = [
WEBTOYS,
TOYS,
]
def empty_virtualenv(
environment,
need_system_flag=True, pip_params=''
):
with cd( '~%s'%(env.user,) ):
with settings( warn_only = True ):
if not exists( environment ):
sudo( 'mkdir -p %s'%(os.path.dirname(environment)))
if need_system_flag:
need_system_flag = '--system-site-packages'
else:
need_system_flag = ''
sudo( 'virtualenv %s %s'%( need_system_flag, environment,) )
def virtualenv(
environment, project_sources, pips,
need_system_flag=True, pip_params='',
):
"""Create a virtual environment at environment with pips and project_sources installed"""
empty_virtualenv( environment=environment, need_system_flag=need_system_flag, pip_params=pip_params )
packages = 'file:///home/%s/packages'%( env.user, )
sudo( '%(environment)s/bin/pip install --no-index --find-links=%(packages)s %(pip_params)s "setuptools==2.1"'%locals() )
sudo( '%(environment)s/bin/pip install --no-index --find-links=%(packages)s %(pip_params)s "pip==1.3"'%locals() )
if pips:
if not isinstance( pips, (list,tuple)):
raise RuntimeError( "Please use list form" )
source_pips = [x for x in pips if x.startswith( 'git+' )]
package_pips = [x for x in pips if not x.startswith( 'git+' )]
for set,extra_params in [
(source_pips,''),
(package_pips,'--no-index --find-links=%(packages)s'%locals())
]:
set = ' '.join( [repr(x) for x in set] )
sudo( 'PIP_DOWNLOAD_CACHE=~/.pip-cache %(environment)s/bin/pip install %(extra_params)s %(pip_params)s %(set)s'%locals() )
for project_source in project_sources:
filename = upload_project( project_source )
sudo( '%(environment)s/bin/pip install --no-deps %(filename)s'%locals() )
def upload_project( project_source ):
"""Find the setup.py, build the package, copy and install"""
subprocess.check_call(
'cd %(project_source)s && python setup.py sdist develop'%locals(),
shell=True,
)
files = glob.glob( os.path.join( project_source, 'dist', '*.tar.gz' ))
files.sort( key = lambda f: os.stat( f ).st_ctime )
current = files[-1]
base = os.path.basename( current )
run( 'mkdir -p ~/tmp' )
file = os.path.join( '~/tmp', base )
put( current, file )
return file
def django_command( command, *args ):
environment = env.virtual_env
admin = env.django_admin
command = '%(admin)s %(command)s '%locals()
command+= " ".join( [str(x) for x in args])
return command
def django_admin( command, *args ):
run( django_command( command, *args ) )
def django_sudo( command, *args ):
sudo( django_command( command, *args ) )
def apt_update():
"""Update apt repositories"""
sudo( 'apt-get update' )
sudo( 'apt-get dist-upgrade --yes' )
sudo( 'apt-get autoremove --yes' )
def dependencies( dependencies ):
"""Install OS-level dependencies"""
if isinstance( dependencies, (list,tuple)):
dependencies = ' '.join( dependencies )
sudo( 'apt-get install --yes %s'%( dependencies ) )
sudo( 'apt-get autoremove --yes' )
def ensure_dependencies( update=False ):
"""Ensure our configure .deb dependencies are all installed"""
if update:
apt_update()
dependencies( DEPENDENCIES )
def django_collectstatic( ):
product = env.product
with settings( warn_only=True ):
sudo( 'mkdir -p /opt/%(product)s/current/www/static'%locals())
django_sudo( 'collectstatic', '--clear', '--noinput' )
django_sudo( 'assets', 'build', '--parse-templates' )
sudo('chmod -R go+r /opt/%(product)s/current/www/static'%locals())
def backup_current():
product = env.product_dir
user = env.user
date = datetime.datetime.now().strftime( '%Y-%m-%d-%H-%M' )
if exists( product ):
sudo( 'mv %(product)s %(product)s-%(date)s'%locals())
return backup_db(date)
def backup_db(date=None):
user = env.user
if date is None:
date = datetime.datetime.now().strftime( '%Y-%m-%d-%H-%M' )
filename = 'backup-%(date)s.sql'%locals()
sudo( 'pg_dump blog > /tmp/%(filename)s'%locals(), user='postgres' )
sudo( 'mv /tmp/%(filename)s /home/%(user)s/'%locals() )
return filename
def initial_install():
install_ssh_keys()
ensure_dependencies(update=True)
with settings( warn_only=True ):
sudo( 'aptitude remove apache2 apache2-mpm-worker apache2-utils apache2.2-bin apache2.2-common' )
sudo( '/etc/init.d/rng-tools restart' )
initial_db()
upgrade_pips()
install()
def initial_db():
sudo( 'createdb -O mcfletch --locale=en_CA.utf8 -E utf8 -T template0 webtoys', user='postgres' )
def update():
install()
def install():
with settings( warn_only=True):
sudo('rm -rf %s'%(env.virtual_env,))
# with settings( warn_only=True ):
# sudo( '/etc/init.d/supervisor stop' )
# sudo( '/etc/init.d/nginx stop' )
venv = env.virtual_env
virtualenv(
venv,
PROJECT_SOURCES,
pips = env.pips,
)
install_templates()
django_admin( 'syncdb' )
django_admin( 'migrate' )
django_collectstatic()
# restart_servers()
def install_templates( ):
# install_su( ETC_SOURCE, '/etc/' )
# install_su( VAR_SOURCE, '/var/', owner=env.user )
# install_su( OPT_SOURCE, '/opt/', owner=env.user )
# install_su( HOME_SOURCE, '/home/%s'%(env.user), owner=env.user )
pass
def restart_servers():
with settings( warn_only=True ):
sudo( '/etc/init.d/supervisor stop' )
sudo( '/etc/init.d/nginx stop' )
sudo( '/etc/init.d/nginx start' )
sudo( '/etc/init.d/supervisor start' )
def install_su(source_dir,target_dir, delete=False, owner='root'):
"""Install source directory into target directory on server"""
temp = '~%s/tmp/'%( env.user,)
run( 'mkdir -p %(temp)s'%locals())
with cd( temp ):
if delete:
project.rsync_project( '%(temp)s'%locals(), source_dir, extra_opts='-l --delete-after' )
else:
project.rsync_project( '%(temp)s'%locals(), source_dir, extra_opts='-l' )
base = os.path.basename( source_dir )
sudo( 'chown -R %(owner)s:%(owner)s %(temp)s%(base)s'%locals() )
sudo( 'rsync -alv %(temp)s%(base)s/* %(target_dir)s'%locals() )
# now switch the template back for next time...
user = env.user
sudo( 'chown -R %(user)s %(temp)s%(base)s'%locals() )
def upgrade_pips( pips=None ):
"""Pull source code for packages into our package cache"""
if pips is None:
pips = env.pips
environment = env.virtual_env
if not exists( environment ):
empty_virtualenv( environment )
pip_params=''# -M --mirrors=http://b.pypi.python.org'
sudo( 'rm -rf ~/packages' )
sudo( 'mkdir -p ~/packages' )
if pips:
pips = [repr(x) for x in pips if not x.startswith( 'git+' )]
if isinstance( pips, (list,tuple)):
pips = ' '.join( pips )
sudo( 'PIP_DOWNLOAD_CACHE=~/.pip-cache %(environment)s/bin/pip install --download=~/packages %(pip_params)s "setuptools==2.1"'%locals() )
sudo( 'PIP_DOWNLOAD_CACHE=~/.pip-cache %(environment)s/bin/pip install --download=~/packages %(pip_params)s "pip==1.3"'%locals() )
sudo( 'PIP_DOWNLOAD_CACHE=~/.pip-cache %(environment)s/bin/pip install --download=~/packages %(pip_params)s %(pips)s'%locals() )
def load_requirements( *reqfiles ):
result = []
for file in reqfiles:
for line in open( file ):
line = line.strip()
if line.startswith( '#' ):
continue
else:
if '#' in line:
line = line.split('#')[0].strip()
result.append( line )
return result
def rm_virtualenv():
sudo( 'rm -rf %s'%env.virtual_env, )
env.pips = load_requirements( REQUIREMENTS_FILE )
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 08 23:00:39 2016
@author: perrytsao
"""
import cv2
import numpy as np
import itertools
from datetime import datetime
timestamp="{:%Y_%m_%d_%H_%M}".format(datetime.now())
# load params to undistort images
calfile=np.load('camera_cal_data_2016_02_20_03_09.npz')
newcameramtx=calfile['newcameramtx']
roi=calfile['roi']
mtx=calfile['mtx']
dist=calfile['dist']
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0;
params.maxThreshold = 256;
# Filter by Area.
params.filterByArea = True
params.minArea = 5
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
# Filter by Inertia
params.filterByInertia =True
params.minInertiaRatio = 0.01
def undistort_crop(orig_img):
#undistort and crop
dst = cv2.undistort(orig_img, mtx, dist, None, newcameramtx)
x,y,w,h = roi
crop_frame = dst[y:y+h, x:x+w]
return crop_frame
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([60,20,20])
upper_green = np.array([95,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
if len(keypoints)==4:
pts= np.array([keypoints[i].pt for i in range(4)])
#x,y=zip(*pts)
# Calculate distances between all combinations of points
dis_vectors = [l - r for l, r in itertools.combinations(pts, 2)]
dcalc=[np.linalg.norm(dis_vectors[i]) for i in range(6)]
# find the closest point to all of them, that is the middle point
mean_a=np.array([dcalc[i] for i in [0,1,2]]).sum()/4.0
mean_b=np.array([dcalc[i] for i in [0,3,4]]).sum()/4.0
mean_c=np.array([dcalc[i] for i in [1,3,5]]).sum()/4.0
mean_d=np.array([dcalc[i] for i in [2,4,5]]).sum()/4.0
middlepoint=np.argmin(np.array([mean_a, mean_b, mean_c, mean_d]))
idx=np.argmax(dcalc) # find two furthest points, those are left and right sidepoints
max_dist_val=np.max(dcalc)
print max_dist_val
if idx ==0:
sidepts=[0,1]
elif idx==1:
sidepts=[0,2]
elif idx==2:
sidepts=[0,3]
elif idx==3:
sidepts=[1,2]
elif idx==4:
sidepts=[1,3]
elif idx==5:
sidepts=[2,3]
# the frontpoint is the remaining one.
frontpoint=6-np.array(sidepts+[middlepoint]).sum()
# now determine which side point is the left one
# http://stackoverflow.com/questions/1560492/how-to-tell-whether-a-point-is-to-the-right-or-left-side-of-a-line
a=keypoints[middlepoint].pt
b=keypoints[frontpoint].pt
c=keypoints[sidepts[0]].pt
if ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) < 0:
leftpt=sidepts[0]
rightpt=sidepts[1]
else:
leftpt=sidepts[1]
rightpt=sidepts[0]
im_with_midpoint = cv2.drawKeypoints(frame, [keypoints[middlepoint]], np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
im_with_midpoint_frontpoint = cv2.drawKeypoints(im_with_midpoint, [keypoints[frontpoint]], np.array([]), (255,0,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
keypoints_side=[keypoints[i] for i in [leftpt]]
im_with_keypoints1 = cv2.drawKeypoints(im_with_midpoint_frontpoint, keypoints_side, np.array([]), (0,255,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
keypoints_side=[keypoints[i] for i in [rightpt]]
im_with_keypoints = cv2.drawKeypoints(im_with_keypoints1, keypoints_side, np.array([]), (255,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
textstr="%0.4f dist. %i,%i center" % (max_dist_val, keypoints[middlepoint].pt[0], keypoints[middlepoint].pt[1])
max_blob_dist=max_dist_val
blob_center=keypoints[middlepoint].pt
keypoints[middlepoint].pt[1]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im_with_keypoints, textstr,(10,50), font, .8,(255,255,255),2,cv2.LINE_AA)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
#im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
im_with_keypoints=crop_frame
print "%i blobs" % (len(keypoints))
max_blob_dist=None
blob_center=None
else:
print "no blobs"
im_with_keypoints=crop_frame
max_blob_dist=None
blob_center=None
return im_with_keypoints, max_blob_dist, blob_center #, keypoint_in_orders
###############################################
cv2.namedWindow("preview")
vc = cv2.VideoCapture(1)
fname="drone_green_dot"
width=800
height=600
wait_time=33
fps=int(1/.001/(float(wait_time)))
vc.set(3,width)
vc.set(4,height)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output13'+timestamp+'.avi',fourcc, 30.0, (width,height),1)
import serial, time, msvcrt
throttle=1000
aileron=1500 # moves left/right
elevator=1500 #moves front back
rudder=1500 # yaw, rotates the drone
tg=5
ag=1
eg=1
rg=1
zpos=50
xypos=(350, 250)
command=""
start_flying=0
no_position_cnt=0
oldz=45
oldx=350
oldy=250
dz=0
dx=0
dy=0
xspeed=0
yspeed=0
zspeed=0
speeds=""
font = cv2.FONT_HERSHEY_SIMPLEX
try:
arduino=serial.Serial('COM3', 115200, timeout=.01)
time.sleep(1) #give the connection a second to settle
if vc.isOpened(): # try to get the first frame
rval, frame_o = vc.read()
#frame_undistort=undistort_crop(np.rot90(frame_o, 2))
frame_undistort=undistort_crop(frame_o)
frame, zpos, xypos=add_blobs(frame_undistort)
else:
rval = False
ii=100
while rval:
cv2.imshow("preview", frame)
key = cv2.waitKey(wait_time)
rval, frame_o = vc.read()
#frame_undistort=undistort_crop(np.rot90(frame_o, 2))
frame_undistort=undistort_crop(frame_o)
frame, zpos, xypos=add_blobs(frame_undistort)
## Serial comms
data = arduino.readline()
while data:
print "[AU]: "+data.rstrip("\n") #strip out the new lines for now
# (better to do .read() in the long run for this reason
data=arduino.readline()
throttle=min(throttle, 2000)
aileron=min(aileron, 1510)
elevator=min(elevator, 1510)
rudder=min(rudder, 1600)
throttle=max(throttle, 1000)
aileron=max(aileron, 1490)
elevator=max(elevator, 1490)
rudder=max(rudder, 1400)
command="%i,%i,%i,%i"% (throttle, aileron, elevator, rudder)
print "[PC]: "+command
arduino.write(command+"\n")
try:
dz=(zpos-oldz)*0.3+0.7*dz
dx=(xypos[0]-oldx)*0.3+0.7*dx
dy=(xypos[1]-oldy)*0.3+0.7*dy
oldz=zpos
oldx=xypos[0]
oldy=xypos[1]
except:
print "no speed"
speeds="dz: %0.2f dx: %0.2f dy: %0.2f" % (dz, dx, dy)
targets="tsz: %0.2f tsx: %0.2f tsy: %0.2f" % (zspeed, xspeed, yspeed)
cv2.putText(frame, command,(10,100), font, .8,(255,255,255),2,cv2.LINE_AA)
cv2.putText(frame, speeds,(10,150), font, .8,(255,255,255),2,cv2.LINE_AA)
cv2.putText(frame, targets,(10,200), font, .8,(255,255,255),2,cv2.LINE_AA)
if start_flying:
frame_pad=cv2.copyMakeBorder(frame,111,0,86,00,cv2.BORDER_CONSTANT,value=[255,0,0])
out.write(frame_pad)
try:
print "Zpos: %i Xpos: %i Ypos: %i" % (zpos, xypos[0], xypos[1])
# compare to target speed
if dz > zspeed:
throttle-=tg
else:
throttle+=tg
if dx > xspeed:
aileron-=ag
else:
aileron+=ag
if dy > yspeed:
elevator-=eg
else:
elevator+=eg
clamp=lambda n, minn, maxn: (max(min(maxn, n), minn))
if zpos > 60:
print "highalt"
aileron=clamp(aileron, 1495, 1510)
elevator=clamp(elevator, 1495, 1510)
else:
print "lowalt"
aileron=clamp(aileron, 1499, 1501)
elevator=clamp(elevator, 1499, 1501)
# set target speeds
if zpos > 65:
zspeed=-.1
else:
zspeed=+.1
if xypos[0] > 350:
xspeed=-0.2 # move left
else:
xspeed=0.2 # move right
if xypos[1] > 250:
yspeed=-0.2 # move up
else:
yspeed=+0.2 # move down
no_position_cnt=0
except:
no_position_cnt+=1
print "no position"
if no_position_cnt>15:
throttle=1000
start_flying=0
## Monitor keyboard
if key == 27: # exit on ESC
break
elif key == 32:
cv2.imwrite(fname+str(ii)+".jpg", frame)
ii+=1
elif key == 119: #w
throttle=1200
aileron=1500 # turns left
elevator=1500
rudder=1500 # yaw, rotates the drone
start_flying=1
print "START FLYING"
elif key == 115: #s
throttle=1000
start_flying=0
finally:
# close the connection
arduino.close()
# re-open the serial port which will w for Arduino Uno to do a reset
# this forces the quadcopter to turn off.
arduino=serial.Serial('COM3', 115200, timeout=.01)
arduino.close()
# close it again so it can be reopened the next time it is run.
vc.release()
cv2.destroyWindow("preview")
out.release()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin'],
help='Type of LVM volumes to deploy'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_LE('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_LE("Size for volume: %s not found, "
"cannot secure delete.") % volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(("Updating volume stats"))
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function()
))
data["pools"].append(single_pool)
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except brick_exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
(vg for vg in vg_list if vg['name'] == self.vg.vg_name).next()
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% exc.stderr)
raise exception.VolumeBackendAPIException(
data=exception_message)
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unabled to delete due to existing snapshot '
'for volume: %s') % volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations") % snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
(vg for vg in vg_list if vg['name'] == dest_vg).next()
except StopIteration:
message = (_LE("Destination Volume Group %s does not exist") %
dest_vg)
LOG.error(message)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
volume['size'],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s."),
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'target_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'target_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
|
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2016 Paul Watkins, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Extends python hdf5 load class to write (new or append) hdf5 data in a subset of a whole dataset.
import h5py
import numpy as np
import re
import argparse
import time
import os
from emdrp.dpLoadh5 import dpLoadh5
from tifffile import imread
class dpWriteh5(dpLoadh5):
HDF5_CLVL = 5 # compression level in hdf5
def __init__(self, args):
dpLoadh5.__init__(self, args)
# Options / Inits
if not self.outfile: self.outfile = self.srcfile
def writeCube(self, data=None, outfile=None):
# do not move this to init, won't work with typesh5.py
# xxx - this class hierarchy maybe should be revisited someday.... to die
if not self.data_type_out: self.data_type_out = self.data_type
if isinstance(self.data_type_out, str): self.data_type_out = eval('np.' + self.data_type_out)
if not self.fillvalue: self.fillvalue = '0'
if isinstance(self.fillvalue, str):
self.fillvalue = np.asscalar(np.fromstring(self.fillvalue, dtype=self.data_type_out, sep=' '))
if data is None:
data = self.data_cube.astype(self.data_type_out)
else:
#assert(data.dtype == self.data_type) # xxx - probably revisit this, this was original
# xxx - is there a problem with fillvalue now?
data = data.astype(self.data_type_out)
# xxx - writeRaw will still write with the type of this object, the out type is only for hdf5
# this option is mostly for frontend compatibility, revisit this again if this is needed for backend
# xxx - re-added this, something more comprehensive probably needs to be done about this... meh
self.data_cube = data; self.data_type = self.data_type_out
if outfile is None: outfile = self.outfile if self.outfile else self.srcfile
self.writeRaw()
if not outfile: return
# xxx - this probably should be cleaned up, allows for basically "copying" a dataset to another hdf5
# with this tool. make this more explicit in how classes are defined?
if self.dataset_out: self.dataset = self.dataset_out
if len(self.subgroups_out)==0 or self.subgroups_out[0] is not None: self.subgroups = self.subgroups_out
if self.offset_out[0] is not None: self.offset = self.offset_out
dset, group, h5file = self.createh5(outfile)
if self.dpWriteh5_verbose:
print('dpWriteh5: Writing hdf5')
t = time.time()
# always write outputs in F-order
ind = self.get_hdf_index_from_chunk_index(dset, self.chunk, self.offset)
ind = ind[self.zreslice_dim_ordering][::-1] # re-order for specified ordering, then to F-order
d = data.transpose((2,1,0));
#print(ind, d.shape, dset.shape, d.max(), d.min(), dset.dtype, d.dtype)
dset[ind[0]:ind[0]+d.shape[0],ind[1]:ind[1]+d.shape[1],ind[2]:ind[2]+d.shape[2]] = d
# optionally add a list of chunk Regions of Interest specified in text file
if self.inroi:
rois=np.loadtxt(self.inroi,dtype=np.int64).reshape((-1,3,3))
self.data_attrs['roi_chunks'] = rois[:,0,:].reshape((-1,3))
self.data_attrs['roi_sizes'] = rois[:,1,:].reshape((-1,3))
self.data_attrs['roi_offsets'] = rois[:,2,:].reshape((-1,3))
# write attributes
for name,value in self.data_attrs.items():
if name in dset.attrs: del dset.attrs[name]
newname = self.dataset + '_' + name
if newname in group: del group[newname]
# xxx - this is arbitrary, but don't want to exceed 64k hdf5 header limit
if isinstance(value, np.ndarray) and value.size > 100:
group.create_dataset(newname, data=value, compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
else:
#http://stackoverflow.com/questions/23220513/storing-a-list-of-strings-to-a-hdf5-dataset-from-python
if isinstance(value, str):
value = value.encode("ascii", "ignore")
elif type(value) is list and isinstance(value[0], str):
value = [n.encode("ascii", "ignore") for n in value]
dset.attrs.create(name,value)
h5file.close()
if self.dpWriteh5_verbose:
print('\tdone in %.4f s' % (time.time() - t))
def createh5(self, outfile):
h5file = h5py.File(outfile, 'r+' if os.path.isfile(outfile) else 'w')
dset, group, dsetpath = self.getDataset(h5file)
if not dset:
self.createh5dataset(h5file, dsetpath)
dset, group, dsetpath = self.getDataset(h5file)
assert( dset ) # dataset not created? this is bad
return dset, group, h5file
def createh5dataset(self, h5file, dsetpath):
if self.dpWriteh5_verbose:
print('dpWriteh5: Creating hdf5 dataset')
t = time.time()
# create an output prob hdf5 file (likely for a larger dataset, this is how outputs are "chunked")
# get the shape and chunk size from the data hdf5. if this file is in F-order, re-order to C-order
shape = self.datasize; chunks = self.chunksize
# do not re-order for F-order here, should have already been re-ordered in dpLoadh5
#if not self.hdf5_Corder:
# shape = shape[::-1]; chunks = chunks[::-1]
# now re-order the dims based on the specified re-ordering and then re-order back to F-order
shape = shape[self.zreslice_dim_ordering]; chunks = chunks[self.zreslice_dim_ordering]
shape = shape[::-1]; chunks = tuple(chunks[::-1])
h5file.create_dataset(dsetpath, shape=shape, dtype=self.data_type_out, compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True, fillvalue=self.fillvalue, chunks=chunks)
if self.dpWriteh5_verbose:
print('\tdone in %.4f s' % (time.time() - t))
def writeFromRaw(self):
self.loadFromRaw()
#if self.dpWriteh5_verbose: print(self.data_cube.min(), self.data_cube.max(), self.data_cube.shape)
# xxx - total hacks, keep commented
#self.data_attrs['dimOrdering'] = [1,2,3]
self.writeCube()
def loadFromRaw(self):
# xxx - this is duplicated in writeCube(), couldn't move it to init because of types.h5
# code needs some refactoring that allows for a single base class and input and output types.
if not self.data_type_out: self.data_type_out = self.data_type
if isinstance(self.data_type_out, str): self.data_type_out = eval('np.' + self.data_type_out)
ext = os.path.splitext(self.inraw)[1][1:]
use_const = False
if not ext:
# kludgy support writing constants (for use as a mask, etc)
try:
const = float(self.inraw)
use_const = True
except ValueError:
pass
if use_const:
print('Initializing with constant value %d' % (const,))
data = const * np.ones(self.size, dtype=self.data_type_out)
elif ext == 'tif':
data = imread(self.inraw)
elif ext == 'nrrd':
# stole this from pynrrd (which wasn't working by itself, gave up on it)
# xxx - new version is available as of early 2016, try migrating to it
_TYPEMAP_NRRD2NUMPY = {'signed char': 'i1', 'int8': 'i1', 'int8_t': 'i1', 'uchar': 'u1',
'unsigned char': 'u1', 'uint8': 'u1', 'uint8_t': 'u1', 'short': 'i2', 'short int': 'i2',
'signed short': 'i2', 'signed short int': 'i2', 'int16': 'i2', 'int16_t': 'i2', 'ushort': 'u2',
'unsigned short': 'u2', 'unsigned short int': 'u2', 'uint16': 'u2', 'uint16_t': 'u2', 'int': 'i4',
'signed int': 'i4', 'int32': 'i4', 'int32_t': 'i4', 'uint': 'u4', 'unsigned int': 'u4', 'uint32': 'u4',
'uint32_t': 'u4', 'longlong': 'i8', 'long long': 'i8', 'long long int': 'i8', 'signed long long': 'i8',
'signed long long int': 'i8', 'int64': 'i8', 'int64_t': 'i8', 'ulonglong': 'u8',
'unsigned long long': 'u8', 'unsigned long long int': 'u8', 'uint64': 'u8', 'uint64_t': 'u8',
'float': 'f4', 'double': 'f8', 'block': 'V'
}
with open(self.inraw,'rb') as nrrdfile:
headerSize = 0; hdr = {'type':self.data_type_out, 'endian':'little'}
for raw_line in iter(nrrdfile):
headerSize += len(raw_line)
raw_line = raw_line.decode('ascii')
# Trailing whitespace ignored per the NRRD spec
line = raw_line.rstrip()
# Single blank line separates the header from the data
if line == '': break
# xxx - very basic header elements
reline = line.lstrip()
m = re.search(r'type\:\s+(.+)', reline)
if m is not None: hdr['type'] = _TYPEMAP_NRRD2NUMPY[m.group(1).strip()]
m = re.search(r'endian\:\s+(\w+)', reline)
if m is not None:
endian = m.group(1).lower()
if endian in ['litle','big']:
hdr['type'] = ('<' if endian == 'little' else '>') + hdr['type']
#space directions: (13.20000000,0,0) (0,13.20000000,0) (0,0,30.00000000)
#space directions: (13.199999999999999,0,0) (0,13.199999999999999,0) (0,0,30)
m = re.search(r'space directions\:'
'\s+\((\d*\.\d+|\d+),0,0\) \(0,(\d*\.\d+|\d+),0\) \(0,0,(\d*\.\d+|\d+)\)', reline)
if m is not None and 'scale' not in self.data_attrs:
self.data_attrs['scale'] = np.array([float(m.group(1)), float(m.group(2)), float(m.group(3))])
nrrdfile.seek(headerSize)
# xxx - fix this to get data type and endianess from the header, pynrrd still sucks too much
#data = np.fromfile(nrrdfile,dtype=self.data_type_out)
#data = np.fromfile(nrrdfile,dtype=self.data_type_out).byteswap(True) # meh, imagej
# addded very basic header elements above just to get the type and endianess correctly
print('nrrd data type for numpy: ' + hdr['type'])
data = np.fromfile(nrrdfile,dtype=np.dtype(hdr['type']))
# pynrrd is super slow and does some kind of view changing for some reason
#import nrrd
#data, hdr = nrrd.read(self.inraw)
elif ext == 'gipl':
data, hdr, info = dpWriteh5.gipl_read_volume(self.inraw)
if 'scale' not in self.data_attrs:
self.data_attrs['scale'] = hdr['scales'][:3]
elif ext == 'h5':
with h5py.File(self.inraw, 'r') as h5file:
data = h5file[self.dataset][:]
else:
if self.inraw_bigendian:
data = np.fromfile(self.inraw,dtype=self.data_type_out).byteswap(True)
else:
data = np.fromfile(self.inraw,dtype=self.data_type_out)
# xxx - hacky command line over-ride for scale
if all([x > 0 for x in self.scale]): self.data_attrs['scale'] = self.scale
# xxx - this always assumes raw file is in F-order, add something here for C-order if we need it
#self.data_cube = data.astype(self.data_type_out).reshape(self.size[::-1]).transpose((2,1,0))
# add support for reslice reordering of raw inputs
zord = self.zreslice_dim_ordering; size = self.size[zord]; tord = [2,1,0]
self.data_cube = data.astype(self.data_type_out).reshape(size[::-1]).transpose([tord[i] for i in zord])
# xxx - move this as a utility or a GIPL class?
# translated from matlab toolbox http://www.mathworks.com/matlabcentral/fileexchange/16407-gipl-toolbox
@staticmethod
def gipl_read_header(fname):
hdr, info = dpLoadh5.gipl_generate_header()
fh = open(fname, 'rb')
# add the file size and name to the info struct
fh.seek(0, os.SEEK_END); info['filesize'] = fh.tell(); fh.seek(0); info['filename'] = fname
# read binary header with correct order / data types, gipl format is big-endian!!!
for field in info['hdr_fields']:
hdr[field] = np.fromfile(fh, dtype=hdr[field].dtype, count=hdr[field].size).byteswap(True)
#print('\t',field,'\tsize',hdr[field].size,'\ttell ',fh.tell())
assert( fh.tell() == info['hdr_size_bytes'] )
assert( hdr['magic_number'] == info['magic_number'] )
fh.close()
return hdr, info
@staticmethod
def gipl_read_volume(fname):
hdr, info = dpWriteh5.gipl_read_header(fname)
dtype = info['numpy_types'][hdr['image_type'][0]]
datasize = hdr['sizes'].prod(dtype=np.int64)
# read data, gipl format is big-endian!!!
fh = open(fname, 'rb')
fh.seek(info['hdr_size_bytes'])
V = np.fromfile(fh, dtype=dtype, count=datasize).byteswap(True).reshape((hdr['sizes'][:3]))
return V, hdr, info
@classmethod
def writeData(cls, outfile, dataset, chunk, offset, size, data_type, datasize, chunksize, fillvalue=None, data=None,
inraw='', outraw='', attrs={}, subgroups_out=[], verbose=False):
assert( data is not None or inraw )
parser = argparse.ArgumentParser(description='class:dpWriteh5',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dpWriteh5.addArgs(parser); arg_str = ''
arg_str += ' --srcfile ' + outfile
arg_str += ' --data-type ' + data_type
arg_str += ' --chunk %d %d %d ' % tuple(chunk)
arg_str += ' --offset %d %d %d ' % tuple(offset)
arg_str += ' --size %d %d %d ' % tuple(size)
arg_str += ' --dataset ' + dataset
arg_str += ' --chunksize %d %d %d' % tuple(chunksize)
arg_str += ' --datasize %d %d %d' % tuple(datasize)
if fillvalue: arg_str += ' --fillvalue ' + str(fillvalue)
if inraw: arg_str += ' --inraw ' + inraw
if outraw: arg_str += ' --outraw ' + outraw
if subgroups_out: arg_str += ' --subgroups-out ' + ' '.join(subgroups_out)
if verbose: arg_str += ' --dpWriteh5-verbose '
if verbose: print(arg_str)
args = parser.parse_args(arg_str.split())
writeh5 = cls(args); writeh5.data_attrs = attrs
if inraw: writeh5.writeFromRaw()
else: writeh5.writeCube(data)
return writeh5
@staticmethod
def addArgs(p):
# adds arguments required for this object to specified ArgumentParser object
dpLoadh5.addArgs(p)
p.add_argument('--outfile', nargs=1, type=str, default='',
help='Output file (allows dataset copy), default: srcfile')
p.add_argument('--chunksize', nargs=3, type=int, default=[-1,-1,-1], metavar=('X', 'Y', 'Z'),
help='Chunk size to use for new hdf5')
p.add_argument('--datasize', nargs=3, type=int, default=[-1,-1,-1], metavar=('X', 'Y', 'Z'),
help='Total size of the hdf5 dataset')
p.add_argument('--fillvalue', nargs=1, type=str, default=[''], metavar=('FILL'),
help='Fill value for empty (default 0)')
p.add_argument('--inraw', nargs=1, type=str, default='', metavar='FILE', help='Raw input file')
p.add_argument('--inraw-bigendian', action='store_true', help='Raw input is big endian format')
p.add_argument('--scale', nargs=3, type=float, default=[0.0,0.0,0.0], metavar=('X', 'Y', 'Z'),
help='Override scale (use only with inraw and without srcfile')
p.add_argument('--inroi', nargs=1, type=str, default='',
help='text file with list of ROIs as chunks, sizes, offsets (in that order)')
p.add_argument('--dataset-out', nargs=1, type=str, default='',
help='Name of the dataset to write: default: dataset')
p.add_argument('--subgroups-out', nargs='*', type=str, default=[None], metavar=('GRPS'),
help='List of groups to identify subgroup for the output dataset (empty for top level), default:subgroups')
p.add_argument('--data-type-out', nargs=1, type=str, default='', metavar='DTYPE',
help='numpy type to write out as')
p.add_argument('--offset-out', nargs=3, type=int, default=[None,None,None], metavar=('X', 'Y', 'Z'),
help='Hacky way to shift datasets over during "copy"')
p.add_argument('--dpWriteh5-verbose', action='store_true', help='Debugging output for dpWriteh5')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Write (create if no file) hdf5 file at specified location',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
dpWriteh5.addArgs(parser)
args = parser.parse_args()
writeh5 = dpWriteh5(args)
if writeh5.outfile and not writeh5.inraw:
writeh5.readCubeToBuffers()
writeh5.writeCube()
else:
writeh5.writeFromRaw()
|
|
#!/usr/bin/env python3
"""
clang-tidy and clang-format related tools for the euphoria project
"""
import argparse
import os
import subprocess
import re
import collections
import sys
import json
import typing
import statistics
from timeit import default_timer as timer
import compile_commands as cc
HEADER_SIZE = 65
HEADER_SPACING = 1
HEADER_START = 3
HEADER_FILES = ['.h', '.hpp', '.hxx']
SOURCE_FILES = ['.cc', '.cpp', '.cxx', '.inl']
CLANG_TIDY_WARNING_CLASS = re.compile(r'\[(\w+([-,]\w+)+)\]')
def file_exist(file: str) -> bool:
return os.path.isfile(file)
def get_file_data(file_name, missing_file):
if file_exist(file_name):
with open(file_name, 'r') as f:
return json.loads(f.read())
else:
return missing_file
def set_file_data(file_name, data):
with open(file_name, 'w') as f:
print(json.dumps(data, sort_keys=True, indent=4), file=f)
def print_header(project_name, header_character='-'):
"""
print a "pretty" header to the terminal
"""
project = ' ' * HEADER_SPACING + project_name + ' ' * HEADER_SPACING
start = header_character*HEADER_START
left = HEADER_SIZE - (len(project) + HEADER_START)
right = header_character*(left) if left > 1 else ''
print(header_character * HEADER_SIZE)
print(start+project+right)
print(header_character * HEADER_SIZE)
def list_files_in_folder(path, extensions):
for root, directories, files in os.walk(path):
for file in files:
ext = os.path.splitext(file)[1]
if extensions is None or ext in extensions:
yield os.path.join(root, file)
def is_file_ignored(path):
with open(path, 'r') as file_handle:
for line in file_handle:
return line.startswith('// clang-tidy: ignore')
return False
def multisort(xs, specs):
for key, reverse in reversed(specs):
xs.sort(key=key, reverse=reverse)
return xs
def sort_and_map_files(root, iterator_files):
ret = {}
get_filename = lambda x: os.path.splitext(x)[0]
get_ext = lambda x: os.path.splitext(x)[1]
files = multisort(list(iterator_files), ((get_filename, False), (get_ext, True)))
for file in files:
rel = os.path.relpath(file, root)
# ignore external folder
# ignore build folder
if rel.startswith('external') or rel.startswith('build'):
pass
elif not is_file_ignored(file):
cat, f = os.path.split(rel)
if cat in ret:
ret[cat].append(file)
else:
ret[cat] = [file]
return ret
def extract_data_from_root(root, files):
return sort_and_map_files(root, list_files_in_folder(root, files))
def clang_tidy_root(root):
return os.path.join(root, 'clang-tidy')
def clang_tidy_lines(root):
"""
return a iterator over the the "compiled" .clang-tidy lines
"""
with open(clang_tidy_root(root), 'r') as clang_tidy_file:
write = False
checks = []
for line in clang_tidy_file:
if write:
l = line.rstrip()
if not l.lstrip().startswith('//'):
yield l
else:
stripped_line = line.strip()
if stripped_line == '':
pass
elif stripped_line[0] == '#':
pass
elif stripped_line == 'END_CHECKS':
write = True
checks_value = ','.join(checks)
yield 'Checks: "{}"'.format(checks_value)
else:
checks.append(stripped_line)
def print_clang_tidy_source(root, clang_tidy_file):
"""
print the clang-tidy "source"
"""
for line in clang_tidy_lines(root):
print(line, file=clang_tidy_file)
def make_clang_tidy(root):
"""
write the .clang-tidy from the clang-tidy "source"
"""
with open(os.path.join(root, '.clang-tidy'), 'w') as clang_tidy_file:
print_clang_tidy_source(root, clang_tidy_file)
def path_to_output_store(build_folder):
return os.path.join(build_folder, 'clang-tidy-store.json')
def get_store(build_folder):
return get_file_data(path_to_output_store(build_folder), {})
def get_last_modification(input_files: typing.List[str]):
sourcemod = 0
for path in input_files:
sourcemod = max(sourcemod, os.path.getmtime(path))
return sourcemod
def is_all_up_to_date(input_files: typing.List[str], output) -> bool:
sourcemod = get_last_modification(input_files)
destmod = 0
if output is not None:
destmod = max(destmod, output)
return sourcemod <= destmod
def get(dictionary, key):
if key in dictionary:
return dictionary[key]
return None
def get_existing_output(root, project_build_folder, source_file):
store = get_store(project_build_folder)
root_file = clang_tidy_root(root)
if source_file in store:
stored = store[source_file]
if is_all_up_to_date([root_file, source_file], stored['time']):
return stored['output'], get(stored, 'time_took') or 0.0
return None, 0.0
def set_existing_output(root, project_build_folder, source_file, existing_output, time):
store = get_store(project_build_folder)
root_file = clang_tidy_root(root)
data = {}
data['time'] = get_last_modification([root_file, source_file])
data['output'] = existing_output
data['time_took'] = time
store[source_file] = data
set_file_data(path_to_output_store(project_build_folder), store)
def call_clang_tidy(root, force: bool, tidy_path, project_build_folder, source_file, name_printer, fix):
"""
runs clang-tidy and returns all the text output
"""
if not force:
existing_output, took = get_existing_output(root, project_build_folder, source_file)
if existing_output is not None:
return existing_output, took
command = [tidy_path, '-p', project_build_folder]
if fix:
command.append('--fix')
command.append(source_file)
try:
name_printer.print_name()
start = timer()
output = subprocess.check_output(command, universal_newlines=True,
encoding='utf8', stderr=subprocess.STDOUT)
end = timer()
took = end - start
set_existing_output(root, project_build_folder, source_file, output, took)
return output, took
except subprocess.CalledProcessError as err:
print(err.returncode)
if err.output is not None:
print(err.output)
sys.exit(err.returncode)
def total(counter):
"""
returns the total number of items in a counter
"""
return sum(counter.values())
class FileStatistics:
def __init__(self):
self.data = {}
def add(self, file, time):
self.data[file] = time
def print_data(self):
if len(self.data) != 0:
average_value = statistics.mean(self.data.values())
min_name = min(self.data, key=lambda key: self.data[key])
max_name = max(self.data, key=lambda key: self.data[key])
print(f'average: {average_value:.2f}s')
print(f'max: {self.data[max_name]:.2f}s for {max_name}')
print(f'min: {self.data[min_name]:.2f}s for {min_name}')
print(f'{len(self.data)} files')
def run_clang_tidy(root, force: bool, tidy_path, source_file, project_build_folder, stats, short, name_printer, fix, printable_file, only):
"""
runs the clang-tidy process, printing status to terminal
"""
output, time_taken = call_clang_tidy(root, force, tidy_path, project_build_folder, source_file, name_printer, fix)
warnings = collections.Counter()
classes = collections.Counter()
if not short and len(only) == 0:
name_printer.print_name()
print(f'took {time_taken:.2f}s')
stats.add(printable_file, time_taken)
print_empty = False
hidden = len(only) > 0
for line in output.split('\n'):
if 'warnings generated' in line:
pass
elif 'Use -header-filter=.* to display errors' in line:
pass
elif 'Suppressed' in line and 'NOLINT).' in line:
pass
elif 'Suppressed' in line and 'non-user code' in line:
pass
else:
if 'warning: ' in line:
warnings[printable_file] += 1
tidy_class = CLANG_TIDY_WARNING_CLASS.search(line)
if tidy_class is not None:
warning_classes = tidy_class.group(1)
for warning_class in warning_classes.split(','):
classes[warning_class] += 1
hidden = len(only) > 0
if warning_class in only:
hidden = False
if line.strip() == '':
if not hidden and print_empty:
print()
print_empty = False
else:
if not hidden:
print_empty = True
print(line)
# print('{} warnings.'.format(total(warnings)))
if not short and len(only) == 0:
print_warning_counter(classes, printable_file)
print()
return warnings, classes
def print_warning_counter(project_counter, project):
"""
print warning counter to the console
"""
print('{} warnings in {}.'.format(total(project_counter), project))
for file, count in project_counter.most_common(10):
print('{} at {}'.format(file, count))
##############################################################################
##############################################################################
def handle_list(args):
root = os.getcwd()
project_build_folder = cc.find_build_root(root)
if project_build_folder is None:
print('unable to find build folder')
return
files = list_files_in_folder(root, SOURCE_FILES)
if args.sort:
sorted = sort_and_map_files(root, files)
for project, source_files in sorted.items():
print_header(project)
for source_file in source_files:
print(source_file)
print()
else:
for file in files:
print(file)
def handle_format(args):
"""
callback function called when running clang.py format
"""
root = os.getcwd()
project_build_folder = cc.find_build_root(root)
if project_build_folder is None:
print('unable to find build folder')
return
data = extract_data_from_root(root, SOURCE_FILES + HEADER_FILES)
for project, source_files in data.items():
print_header(project)
for source_file in source_files:
print(os.path.basename(source_file), flush=True)
if args.nop is False:
subprocess.call(['clang-format', '-i', source_file])
print()
def handle_make_tidy(args):
"""
callback function called when running clang.py make
"""
root = os.getcwd()
if args.nop:
print_clang_tidy_source(root, sys.stdout)
else:
make_clang_tidy(root)
class NamePrinter:
def __init__(self, name):
self.name = name
self.printed = False
def print_name(self):
if not self.printed:
print(self.name, flush=True)
self.printed = True
def filter_out_file(filters, file):
if filters is not None:
return all(f not in file for f in filters)
return False
def handle_tidy(args):
"""
callback function called when running clang.py tidy
"""
root = os.getcwd()
project_build_folder = cc.find_build_root(root)
if project_build_folder is None:
print('unable to find build folder')
return
make_clang_tidy(root)
tidy_path = args.tidy
force = args.force
print(f'using clang-tidy: {tidy_path}')
total_counter = collections.Counter()
total_classes = collections.Counter()
warnings_per_file = {}
data = extract_data_from_root(root, SOURCE_FILES + HEADER_FILES if args.headers else SOURCE_FILES)
stats = FileStatistics()
try:
for project, source_files in data.items():
first_file = True
project_counter = collections.Counter()
# source_files = list_source_files(root, project)
for source_file in source_files:
printable_file = os.path.relpath(source_file, root)
if filter_out_file(args.filter, source_file):
continue
print_name = NamePrinter(printable_file)
if first_file:
if not args.short:
print_header(project)
first_file = False
if args.nop is False:
warnings, classes = run_clang_tidy(root, force, tidy_path, source_file, project_build_folder, stats, args.short, print_name, args.fix, printable_file, args.only)
if args.short and len(warnings) > 0:
break
project_counter.update(warnings)
total_counter.update(warnings)
total_classes.update(classes)
for k in classes.keys():
if k in warnings_per_file:
warnings_per_file[k].append(printable_file)
else:
warnings_per_file[k] = [printable_file]
else:
print_name.print_name()
if not first_file and not args.short:
if len(args.only) == 0:
print_warning_counter(project_counter, project)
print()
print()
except KeyboardInterrupt:
if not args.short:
print()
print()
if not args.short and len(args.only) == 0:
print_header('TIDY REPORT')
print_warning_counter(total_counter, 'total')
print()
print_warning_counter(total_classes, 'classes')
print()
print('-' * 80)
print()
for k,v in warnings_per_file.items():
print(f'{k}:')
for f in v:
print(f' {f}')
print()
print('-' * 80)
print()
stats.print_data()
if len(total_counter) > 0:
sys.exit(-1)
else:
sys.exit(0)
##############################################################################
def main():
"""
entry point function for running the clang.py script
"""
parser = argparse.ArgumentParser(description='do clang stuff')
sub_parsers = parser.add_subparsers(dest='command_name', title='Commands',
help='', metavar='<command>')
sub = sub_parsers.add_parser('make', help='make .clang-tidy')
sub.add_argument('--nop', action='store_true', help="don't write anything")
sub.set_defaults(func=handle_make_tidy)
sub = sub_parsers.add_parser('tidy', help='do clang tidy on files')
sub.add_argument('--nop', action='store_true', help="don't do anything")
sub.add_argument('--fix', action='store_true', help="try to fix the source")
sub.add_argument('filter', default=[], nargs='+')
sub.add_argument('--short', action='store_true', help="use shorter and stop after one file")
sub.add_argument('--list', action='store_true', help="also list files in the summary")
sub.add_argument('--no-headers', dest='headers', action='store_false', help="don't tidy headers")
sub.add_argument('--only', nargs='*', default=[])
sub.add_argument('--force', action='store_true', help="Force clang-tidy to run, even if there is a result")
sub.add_argument('--tidy', help='the clang-tidy to use', default='clang-tidy')
sub.set_defaults(func=handle_tidy)
sub = sub_parsers.add_parser('format', help='do clang format on files')
sub.add_argument('--nop', action='store_true', help="don't do anything")
sub.set_defaults(func=handle_format)
sub = sub_parsers.add_parser('ls', help='list files')
sub.add_argument('--new', action='store_true', help="use new lister")
sub.add_argument('--sort', action='store_true', help="sort listing")
sub.set_defaults(func=handle_list)
args = parser.parse_args()
if args.command_name is not None:
args.func(args)
else:
parser.print_help()
##############################################################################
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
|
import numpy as np
import librosa
import six
from os import environ
import sklearn
from sklearn.externals import joblib
import os
import sox
import tempfile as tmp
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import mode
from collections import namedtuple
import operator
import pandas as pd
from collections import OrderedDict
import json
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
TARGET_NAMES = ["piano", "electric_piano", "synthesizer", "violin", "cello",
"acoustic_guitar", "clean_electric_guitar", "distorted_electric_guitar", "electric_bass",
"drum_set", "auxiliary_percussion", "female_singer", "male_singer",
"clarinet", "flute", "trumpet", "saxophone", "banjo"]
MFCC_MEANS_PATH = "resources/mfcc_means.npy"
MFCC_STD_PATH = "resources/mfcc_std.npy"
MFCC_MATRIX_PATH = "resources/mfcc_matrix.npy"
LABEL_MATRIX_PATH = "resources/label_matrix.npy"
MODEL_SAVE_PATH = "resources/instrument_classifier.pkl"
TRAIN_FOLDER = "resources/train_data"
FILE_DICT_PATH = "resources/file_dict.json"
def get_data():
"""
Load the dictionary of data from MedleyDB and Philharmonia.
Parameters
----------
None
Returns
-------
file_dict: dictionary
dictionary of instrument and filepaths of examples from MedleyDB and Philharmonia
"""
with open(FILE_DICT_PATH, 'r') as fp:
file_dict = json.load(fp)
return file_dict
def compute_features(file):
"""
Compute the features of an audio file and return a stacked matrix
Parameters
----------
file: str
file path of audio
Returns
-------
M: array
Matrix of features vs time
y: array
audio time series
fs: int
sampling rate
"""
y, fs = librosa.load(file)
mfcc = np.array(librosa.feature.mfcc(y, sr=fs, n_mfcc=40))
mfcc_delta = np.array(librosa.feature.delta(mfcc))
mfcc_delta_delta = np.array(librosa.feature.delta(mfcc, order=2))
M = np.vstack((mfcc, mfcc_delta, mfcc_delta_delta))
return M, y, fs
def normalize_audio(file):
"""
Normalize volume and remove silence from audio file.
Parameters
----------
file: str
file path of audio
Returns
-------
None
"""
temp_fpath = tmp.NamedTemporaryFile(suffix=".wav")
tfm = sox.Transformer()
tfm.norm(db_level=-6)
tfm.silence()
tfm.build(file, temp_fpath.name)
def mfcc_and_label(n_instruments=None, train_folder=TRAIN_FOLDER, file_dict=None):
"""
Retrieves data, loads exisitng mfcc and label files,
loops over every file for each instrument in TARGET NAMES
to normalize audio and compute features. Matrices are concatenated into a
master matrix across all audio files. Returns the MFCC and label matrix.
Parameters
----------
n_instruments: int
Specifies which instruments of TARGET NAMES to retrieve data -
purpose is to shorten data fetching while fixing code.
file_dict: dictionary
dictionary of instrument and filepaths of examples from MedleyDB and Philharmonia
Returns
-------
train_mfcc_matrix: array
Matrix of features across all training examples
train_label_matrix: array
Matrix of labels across all training examles
"""
if file_dict is None:
file_dict = get_data()
train_mfcc_list = []
train_label_list = []
label_index = 0
if n_instruments is None:
instrument_labels = TARGET_NAMES
else:
instrument_labels = TARGET_NAMES[n_instruments:]
for label in instrument_labels:
train_mfcc_file = os.path.join(train_folder, "%s-mfcc-train.npy" % label)
train_label_file = os.path.join(train_folder, "%s-label-train.npy" % label)
if os.path.exists(train_mfcc_file) and os.path.exists(train_label_file):
print "loading existing training file..."
instrument_mfcc_matrix_train = np.load(train_mfcc_file)
instrument_label_matrix_train = np.load(train_label_file)
else:
print "creating new file..."
instrument_mfcc_list_train = []
instrument_label_list_train = []
# loop over files for each instrument
for fpath in file_dict[label]:
normalize_audio(fpath)
M, y, fs = compute_features(fpath)
lab = np.zeros((len(M[0]), )) + label_index
instrument_mfcc_list_train.append(M)
instrument_label_list_train.append(lab)
instrument_mfcc_matrix_train = np.hstack(instrument_mfcc_list_train) #stacking matrices for each audio file
instrument_label_matrix_train = np.hstack(instrument_label_list_train)
print "saving file..."
np.save(train_mfcc_file, instrument_mfcc_matrix_train)
np.save(train_label_file, instrument_label_matrix_train)
train_mfcc_list.append(instrument_mfcc_matrix_train)
train_label_list.append(instrument_label_matrix_train)
label_index = label_index + 1
train_mfcc_matrix = np.hstack(train_mfcc_list).T
train_label_matrix = np.hstack(train_label_list)
return (train_mfcc_matrix, train_label_matrix)
def standardize_matrix(matrix, mean, std):
"""
Standardizes a matrix
Paramters
---------
matrix: array
matrix that is being normalized.
mean: array
Row vector containing the mean of each column.
std: array
standard deviation of of matrix
"""
matrix_normal = (matrix - mean)/std
return matrix_normal
# def load_existing_data(train_folder=TRAIN_FOLDER):
# for label in TARGET_NAMES:
# train_mfcc_file = os.path.join(train_folder, "%s-mfcc-train.npy" % label)
# train_label_file = os.path.join(train_folder, "%s-label-train.npy" % label)
# if os.path.exists(train_mfcc_file) and os.path.exists(train_label_file):
# print "loading existing training file..."
# instrument_mfcc_matrix_train = np.load(train_mfcc_file)
# instrument_label_matrix_train = np.load(train_label_file)
# train_mfcc_list.append(instrument_mfcc_matrix_train)
# train_label_list.append(instrument_label_matrix_train)
#STEP 1/2 ------------------------------------
def create_data(n_instruments=None, file_dict=None, train_mfcc_matrix=None, train_label_matrix=None,
mfcc_means_path=MFCC_MEANS_PATH,
mfcc_std_path=MFCC_STD_PATH,
mfcc_matrix_path=MFCC_MATRIX_PATH,
label_matrix_path=LABEL_MATRIX_PATH,
target_names=TARGET_NAMES):
"""
Retrieves feature and label matrix, standardizes the feature matrix,
and saves the normalized feature matrix and label matrix.
Paramters
---------
n_instrumnets: int
train_mfcc_matrix: array
feature matrix
train_label_matrix: array
label matrix
mfcc_means_path: str
file path to mean of feature matrix
mfcc_std_path: str
file path to standard deviation of feature matrix
mfcc_matrix_path: str
file path to feature matrix
label_matrix_path: str
file path to label matrix
TARGET_NAMES: array
vector of instruments being dealt with
Returns
-------
None
"""
print "creating data..."
if train_mfcc_matrix is None and train_label_matrix is None:
# load_existing_data()
train_mfcc_matrix, train_label_matrix = mfcc_and_label()
#STANDARDIZING MFCC MATRIX
train_mfcc_means = np.mean(train_mfcc_matrix, axis = 0)
train_mfcc_std = np.std(train_mfcc_matrix, axis=0)
np.save(mfcc_means_path, train_mfcc_means)
np.save(mfcc_std_path, train_mfcc_std)
train_mfcc_matrix_normal = standardize_matrix(train_mfcc_matrix, train_mfcc_means, train_mfcc_std)
np.save(mfcc_matrix_path, train_mfcc_matrix_normal)
np.save(label_matrix_path, train_label_matrix)
#STEP 2/2 ------------------------------------
def train(mfcc_matrix_path=MFCC_MATRIX_PATH,
label_matrix_path=LABEL_MATRIX_PATH,
model_save_path=MODEL_SAVE_PATH):
"""
Trains the RandomForest model using normalized feature and label matrix.
Parameters
----------
n_estimators: int
Number of estimators the random forest classifier should use
mfcc_matrix_path: str
file path to feature matrix
label_matrix_path: str
file path to label matrix
model_save_path: str
file path to save location of classifier model
Returns
-------
clf: classifier
Trained RandomForest Classifier
"""
print "performing cross validation and training model..."
train_mfcc_matrix_normal = np.load(mfcc_matrix_path)
train_label_matrix = np.load(label_matrix_path)
x_train, y_train = (train_mfcc_matrix_normal, train_label_matrix)
clf = RandomForestClassifier()
# specify parameters and distributions to sample from
param_dist = {"max_depth": [20, 100, 150, 300, 500, None],
"n_estimators": [100, 400],
"class_weight": [None, "balanced"],
"max_features": sp_randint(1, 120),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
random_search.fit(x_train, y_train)
print ("Overall training score = " + str(random_search.score(x_train, y_train)))
best_clf = random_search.best_estimator_
joblib.dump(best_clf, model_save_path)
return best_clf
def instrument(predictions):
"""
Predicts the instrument by calculating the mode of the vector of predictions.
Parameters
----------
predictions: array
vector of predictions for each slice of feature
Returns
-------
guess: str
final prediciton of instrument in audio file
guess_dict: dictionary
dictionary of probability of each instrument in TARGET NAMES being the instrument in audio file
"""
unique_elements, counts = np.unique(predictions, return_counts=True)
frequency_predictions = [0 for i in range(len(TARGET_NAMES))]
for i, j in zip(unique_elements, range(len(counts))):
frequency_predictions[int(i)] = counts[int(j)]/float(len(predictions))
guess_dict = {}
instrument_probability = zip(TARGET_NAMES, frequency_predictions)
for name, probability in instrument_probability:
guess_dict[name] = round(probability, 3)
mode_predictions = mode(predictions)
guess = TARGET_NAMES[int(mode_predictions[0])]
return guess, guess_dict
def predict(audio_file,
mfcc_means_path=MFCC_MEANS_PATH,
mfcc_std_path=MFCC_STD_PATH,
model_save_path=MODEL_SAVE_PATH):
"""
Tests the classifier on an audio file given by user.
Prints the guess and a table of probabilities for each instrument.
Parameters
----------
audio_file: str
file path to audio file that the classifier will act upon
mfcc_means_path: str
file path of feature means
mfcc_std_path: str
file path of feature standard deviations
model_save_path: str
file path of classification model
Returns
-------
sorted guesses: sorted dictionary
table of probabilities for each instrument sorted by probabiities
"""
clf = joblib.load(model_save_path)
train_mfcc_means = np.load(mfcc_means_path)
train_mfcc_std = np.load(mfcc_std_path)
# normalizing volume and compute MFCC
normalize_audio(audio_file)
M, y, fs = compute_features(audio_file)
audio_mfcc_matrix_normal = standardize_matrix(M.T, train_mfcc_means, train_mfcc_std)
# np.save("/Users/hmyip/Documents/repositories/instclf/tests/data/piano_matrix.npy", audio_mfcc_matrix_normal)
#prediction with mode
predictions = clf.predict(audio_mfcc_matrix_normal)
guess, guess_dict = instrument(predictions)
print ("guess: " + str(guess))
sorted_guesses = sorted(guess_dict.items(), key=lambda item: (item[1], item[0]), reverse=True)
for key, value in sorted_guesses:
print ("%s: %s" % (key, value))
return guess, guess_dict
create_data()
train()
# #prediction with probabilities
# def predict_prob(classifier, matrix):
# predictions2 = classifier.predict_proba(matrix)
# return predictions2
# def instrument2(predictions):
# avg_predictions = np.round(predictions.mean(axis=0), 3)
# max_prediction = np.argmax(avg_predictions)
# guess2 = TARGET_NAMES[max_prediction]
# print avg_predictions
# guess_dict = {}
# instrument_probability = zip(TARGET_NAMES, avg_predictions)
# for name, probability in instrument_probability:
# guess_dict[name] = round(probability, 3)
# sorted_guesses = OrderedDict(sorted(guess_dict.items(), key=operator.itemgetter(1), reverse=True))
# return sorted_guesses, guess2
# predictions2 = clf.predict_proba(audio_mfcc_matrix_normal)
# sorted_guesses, guess2 = instrument2(predictions2)
# print pd.DataFrame(sorted_guesses.items(), columns = ["instrument", "percent chance"])
# print ("guess2: " + str(guess2))
# #method 1
# plt.figure()
# plt.subplot(1,2,1)
# plt.plot(np.arange(len(TARGET_NAMES)), predictions, "o")
# plt.xticks(np.arange(len(TARGET_NAMES)), TARGET_NAMES, rotation="vertical")
# #method 2
# plt.subplot(1,2,2)
# plt.plot(np.arange(len(TARGET_NAMES)), avg_predictions, "o")
# plt.xticks(np.arange(len(TARGET_NAMES)), TARGET_NAMES, rotation="vertical")
# plt.show()
|
|
# Copyright 2015 HGST
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Desc : Driver to store Cinder volumes using HGST Flash Storage Suite
Require : HGST Flash Storage Suite
Author : Earle F. Philhower, III <earle.philhower.iii@hgst.com>
"""
import grp
import json
import math
import os
import pwd
import six
import socket
import string
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.i18n import _LW
from cinder.image import image_utils
from cinder import interface
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
hgst_opts = [
cfg.StrOpt('hgst_net',
default='Net 1 (IPv4)',
help='Space network name to use for data transfer'),
cfg.StrOpt('hgst_storage_servers',
default='os:gbd0',
help='Comma separated list of Space storage servers:devices. '
'ex: os1_stor:gbd0,os2_stor:gbd0'),
cfg.StrOpt('hgst_redundancy',
default='0',
help='Should spaces be redundantly stored (1/0)'),
cfg.StrOpt('hgst_space_user',
default='root',
help='User to own created spaces'),
cfg.StrOpt('hgst_space_group',
default='disk',
help='Group to own created spaces'),
cfg.StrOpt('hgst_space_mode',
default='0600',
help='UNIX mode for created spaces'),
]
CONF = cfg.CONF
CONF.register_opts(hgst_opts)
@interface.volumedriver
class HGSTDriver(driver.VolumeDriver):
"""This is the Class to set in cinder.conf (volume_driver).
Implements a Cinder Volume driver which creates a HGST Space for each
Cinder Volume or Snapshot requested. Use the vgc-cluster CLI to do
all management operations.
The Cinder host will nominally have all Spaces made visible to it,
while individual compute nodes will only have Spaces connected to KVM
instances connected.
"""
VERSION = '1.0.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "HGST_Solutions_CI"
VGCCLUSTER = 'vgc-cluster'
SPACEGB = units.G - 16 * units.M # Workaround for shrinkage Bug 28320
BLOCKED = "BLOCKED" # Exit code when a command is blocked
def __init__(self, *args, **kwargs):
"""Initialize our protocol descriptor/etc."""
super(HGSTDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(hgst_opts)
self._vgc_host = None
self.check_for_setup_error()
self._stats = {'driver_version': self.VERSION,
'reserved_percentage': 0,
'storage_protocol': 'hgst',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'vendor_name': 'HGST',
}
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = backend_name or 'hgst'
self.update_volume_stats()
def _log_cli_err(self, err):
"""Dumps the full command output to a logfile in error cases."""
LOG.error(_LE("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n"
"err: %(stderr)s"),
{'cmd': err.cmd, 'code': err.exit_code,
'stdout': err.stdout, 'stderr': err.stderr})
def _find_vgc_host(self):
"""Finds vgc-cluster hostname for this box."""
params = [self.VGCCLUSTER, "domain-list", "-1"]
try:
out, unused = self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Unable to get list of domain members, check that "
"the cluster is running.")
raise exception.VolumeDriverException(message=msg)
domain = out.splitlines()
params = ["ip", "addr", "list"]
try:
out, unused = self._execute(*params, run_as_root=False)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Unable to get list of IP addresses on this host, "
"check permissions and networking.")
raise exception.VolumeDriverException(message=msg)
nets = out.splitlines()
for host in domain:
try:
ip = socket.gethostbyname(host)
for l in nets:
x = l.strip()
if x.startswith("inet %s/" % ip):
return host
except socket.error:
pass
msg = _("Current host isn't part of HGST domain.")
raise exception.VolumeDriverException(message=msg)
def _hostname(self):
"""Returns hostname to use for cluster operations on this box."""
if self._vgc_host is None:
self._vgc_host = self._find_vgc_host()
return self._vgc_host
def _make_server_list(self):
"""Converts a comma list into params for use by HGST CLI."""
csv = self.configuration.safe_get('hgst_storage_servers')
servers = csv.split(",")
params = []
for server in servers:
params.append('-S')
params.append(six.text_type(server))
return params
def _make_space_name(self, name):
"""Generates the hashed name for the space from the name.
This must be called in a locked context as there are race conditions
where 2 contexts could both pick what they think is an unallocated
space name, and fail later on due to that conflict.
"""
# Sanitize the name string
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
name = ''.join(c for c in name if c in valid_chars)
name = name.strip(".") # Remove any leading .s from evil users
name = name or "space" # In case of all illegal chars, safe default
# Start out with just the name, truncated to 14 characters
outname = name[0:13]
# See what names already defined
params = [self.VGCCLUSTER, "space-list", "--name-only"]
try:
out, unused = self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Unable to get list of spaces to make new name. Please "
"verify the cluster is running.")
raise exception.VolumeDriverException(message=msg)
names = out.splitlines()
# And anything in /dev/* is also illegal
names += os.listdir("/dev") # Do it the Python way!
names += ['.', '..'] # Not included above
# While there's a conflict, add incrementing digits until it passes
itr = 0
while outname in names:
itrstr = six.text_type(itr)
outname = outname[0:13 - len(itrstr)] + itrstr
itr += 1
return outname
def _get_space_size_redundancy(self, space_name):
"""Parse space output to get allocated size and redundancy."""
params = [self.VGCCLUSTER, "space-list", "-n", space_name, "--json"]
try:
out, unused = self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Unable to get information on space %(space)s, please "
"verify that the cluster is running and "
"connected.") % {'space': space_name}
raise exception.VolumeDriverException(message=msg)
ret = json.loads(out)
retval = {}
retval['redundancy'] = int(ret['resources'][0]['redundancy'])
retval['sizeBytes'] = int(ret['resources'][0]['sizeBytes'])
return retval
def _adjust_size_g(self, size_g):
"""Adjust space size to next legal value because of redundancy."""
# Extending requires expanding to a multiple of the # of
# storage hosts in the cluster
count = len(self._make_server_list()) // 2 # Remove -s from count
if size_g % count:
size_g = int(size_g + count)
size_g -= size_g % count
return int(math.ceil(size_g))
def do_setup(self, context):
pass
def _get_space_name(self, volume):
"""Pull name of /dev/<space> from the provider_id."""
try:
return volume.get('provider_id')
except Exception:
return '' # Some error during create, may be able to continue
def _handle_blocked(self, err, msg):
"""Safely handle a return code of BLOCKED from a cluster command.
Handle the case where a command is in BLOCKED state by trying to
cancel it. If the cancel fails, then the command actually did
complete. If the cancel succeeds, then throw the original error
back up the stack.
"""
if (err.stdout is not None) and (self.BLOCKED in err.stdout):
# Command is queued but did not complete in X seconds, so
# we will cancel it to keep things sane.
request = err.stdout.split('\n', 1)[0].strip()
params = [self.VGCCLUSTER, 'request-cancel']
params += ['-r', six.text_type(request)]
throw_err = False
try:
self._execute(*params, run_as_root=True)
# Cancel succeeded, the command was aborted
# Send initial exception up the stack
LOG.error(_LE("VGC-CLUSTER command blocked and cancelled."))
# Can't throw it here, the except below would catch it!
throw_err = True
except Exception:
# The cancel failed because the command was just completed.
# That means there was no failure, so continue with Cinder op
pass
if throw_err:
self._log_cli_err(err)
msg = _("Command %(cmd)s blocked in the CLI and was "
"cancelled") % {'cmd': six.text_type(err.cmd)}
raise exception.VolumeDriverException(message=msg)
else:
# Some other error, just throw it up the chain
self._log_cli_err(err)
raise exception.VolumeDriverException(message=msg)
def _add_cinder_apphost(self, spacename):
"""Add this host to the apphost list of a space."""
# Connect to source volume
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', spacename]
params += ['-A', self._hostname()]
params += ['--action', 'ADD'] # Non-error to add already existing
try:
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
msg = _("Unable to add Cinder host to apphosts for space "
"%(space)s") % {'space': spacename}
self._handle_blocked(err, msg)
@lockutils.synchronized('devices', 'cinder-hgst-')
def create_volume(self, volume):
"""API entry to create a volume on the cluster as a HGST space.
Creates a volume, adjusting for GiB/GB sizing. Locked to ensure we
don't have race conditions on the name we pick to use for the space.
"""
# For ease of deugging, use friendly name if it exists
volname = self._make_space_name(volume['display_name']
or volume['name'])
volnet = self.configuration.safe_get('hgst_net')
volbytes = volume['size'] * units.Gi # OS=Base2, but HGST=Base10
volsize_gb_cinder = int(math.ceil(float(volbytes) /
float(self.SPACEGB)))
volsize_g = self._adjust_size_g(volsize_gb_cinder)
params = [self.VGCCLUSTER, 'space-create']
params += ['-n', six.text_type(volname)]
params += ['-N', six.text_type(volnet)]
params += ['-s', six.text_type(volsize_g)]
params += ['--redundancy', six.text_type(
self.configuration.safe_get('hgst_redundancy'))]
params += ['--user', six.text_type(
self.configuration.safe_get('hgst_space_user'))]
params += ['--group', six.text_type(
self.configuration.safe_get('hgst_space_group'))]
params += ['--mode', six.text_type(
self.configuration.safe_get('hgst_space_mode'))]
params += self._make_server_list()
params += ['-A', self._hostname()] # Make it visible only here
try:
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
msg = _("Error in space-create for %(space)s of size "
"%(size)d GB") % {'space': volname,
'size': int(volsize_g)}
self._handle_blocked(err, msg)
# Stash away the hashed name
provider = {}
provider['provider_id'] = volname
return provider
def update_volume_stats(self):
"""Parse the JSON output of vgc-cluster to find space available."""
params = [self.VGCCLUSTER, "host-storage", "--json"]
try:
out, unused = self._execute(*params, run_as_root=True)
ret = json.loads(out)
cap = ret["totalCapacityBytes"] // units.Gi
used = ret["totalUsedBytes"] // units.Gi
avail = cap - used
if int(self.configuration.safe_get('hgst_redundancy')) == 1:
cap = cap // 2
avail = avail // 2
# Reduce both by 1 GB due to BZ 28320
if cap > 0:
cap = cap - 1
if avail > 0:
avail = avail - 1
except processutils.ProcessExecutionError as err:
# Could be cluster still starting up, return unknown for now
LOG.warning(_LW("Unable to poll cluster free space."))
self._log_cli_err(err)
cap = 'unknown'
avail = 'unknown'
self._stats['free_capacity_gb'] = avail
self._stats['total_capacity_gb'] = cap
self._stats['reserved_percentage'] = 0
def get_volume_stats(self, refresh=False):
"""Return Volume statistics, potentially cached copy."""
if refresh:
self.update_volume_stats()
return self._stats
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from an existing one.
No cloning operation in the current release so simply copy using
DD to a new space. This could be a lengthy operation.
"""
# Connect to source volume
volname = self._get_space_name(src_vref)
self._add_cinder_apphost(volname)
# Make new volume
provider = self.create_volume(volume)
self._add_cinder_apphost(provider['provider_id'])
# And copy original into it...
info = self._get_space_size_redundancy(volname)
volutils.copy_volume(
self.local_path(src_vref),
"/dev/" + provider['provider_id'],
info['sizeBytes'] // units.Mi,
self.configuration.volume_dd_blocksize,
execute=self._execute)
# That's all, folks!
return provider
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def delete_volume(self, volume):
"""Delete a Volume's underlying space."""
volname = self._get_space_name(volume)
if volname:
params = [self.VGCCLUSTER, 'space-delete']
params += ['-n', six.text_type(volname)]
# This can fail benignly when we are deleting a snapshot
try:
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
LOG.warning(_LW("Unable to delete space %(space)s"),
{'space': volname})
self._log_cli_err(err)
else:
# This can be benign when we are deleting a snapshot
LOG.warning(_LW("Attempted to delete a space that's not there."))
def _check_host_storage(self, server):
if ":" not in server:
msg = _("hgst_storage server %(svr)s not of format "
"<host>:<dev>") % {'svr': server}
raise exception.VolumeDriverException(message=msg)
h, b = server.split(":")
try:
params = [self.VGCCLUSTER, 'host-storage', '-h', h]
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Storage host %(svr)s not detected, verify "
"name") % {'svr': six.text_type(server)}
raise exception.VolumeDriverException(message=msg)
def check_for_setup_error(self):
"""Throw an exception if configuration values/setup isn't okay."""
# Verify vgc-cluster exists and is executable by cinder user
try:
params = [self.VGCCLUSTER, '--version']
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("Cannot run vgc-cluster command, please ensure software "
"is installed and permissions are set properly.")
raise exception.VolumeDriverException(message=msg)
# Checks the host is identified with the HGST domain, as well as
# that vgcnode and vgcclustermgr services are running.
self._vgc_host = None
self._hostname()
# Redundancy better be 0 or 1, otherwise no comprendo
r = six.text_type(self.configuration.safe_get('hgst_redundancy'))
if r not in ["0", "1"]:
msg = _("hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in "
"cinder.conf.")
raise exception.VolumeDriverException(message=msg)
# Verify user and group exist or we can't connect volumes
try:
pwd.getpwnam(self.configuration.safe_get('hgst_space_user'))
grp.getgrnam(self.configuration.safe_get('hgst_space_group'))
except KeyError as err:
msg = _("hgst_group %(grp)s and hgst_user %(usr)s must map to "
"valid users/groups in cinder.conf") % {
'grp': self.configuration.safe_get('hgst_space_group'),
'usr': self.configuration.safe_get('hgst_space_user')}
raise exception.VolumeDriverException(message=msg)
# Verify mode is a nicely formed octal or integer
try:
int(self.configuration.safe_get('hgst_space_mode'))
except Exception as err:
msg = _("hgst_space_mode must be an octal/int in cinder.conf")
raise exception.VolumeDriverException(message=msg)
# Validate network maps to something we know about
try:
params = [self.VGCCLUSTER, 'network-list']
params += ['-N', self.configuration.safe_get('hgst_net')]
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _("hgst_net %(net)s specified in cinder.conf not found "
"in cluster") % {
'net': self.configuration.safe_get('hgst_net')}
raise exception.VolumeDriverException(message=msg)
# Storage servers require us to split them up and check for
sl = self.configuration.safe_get('hgst_storage_servers')
if (sl is None) or (six.text_type(sl) == ""):
msg = _("hgst_storage_servers must be defined in cinder.conf")
raise exception.VolumeDriverException(message=msg)
servers = sl.split(",")
# Each server must be of the format <host>:<storage> w/host in domain
for server in servers:
self._check_host_storage(server)
# We made it here, we should be good to go!
return True
def create_snapshot(self, snapshot):
"""Create a snapshot volume.
We don't yet support snaps in SW so make a new volume and dd the
source one into it. This could be a lengthy operation.
"""
origvol = {}
origvol['name'] = snapshot['volume_name']
origvol['size'] = snapshot['volume_size']
origvol['id'] = snapshot['volume_id']
origvol['provider_id'] = snapshot.get('volume').get('provider_id')
# Add me to the apphosts so I can see the volume
self._add_cinder_apphost(self._get_space_name(origvol))
# Make snapshot volume
snapvol = {}
snapvol['display_name'] = snapshot['display_name']
snapvol['name'] = snapshot['name']
snapvol['size'] = snapshot['volume_size']
snapvol['id'] = snapshot['id']
provider = self.create_volume(snapvol)
# Create_volume attaches the volume to this host, ready to snapshot.
# Copy it using dd for now, we don't have real snapshots
# We need to copy the entire allocated volume space, Nova will allow
# full access, even beyond requested size (when our volume is larger
# due to our ~1B byte alignment or cluster makeup)
info = self._get_space_size_redundancy(origvol['provider_id'])
volutils.copy_volume(
self.local_path(origvol),
"/dev/" + provider['provider_id'],
info['sizeBytes'] // units.Mi,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return provider
def delete_snapshot(self, snapshot):
"""Delete a snapshot. For now, snapshots are full volumes."""
self.delete_volume(snapshot)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create volume from a snapshot, but snaps still full volumes."""
return self.create_cloned_volume(volume, snapshot)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
We may not actually need to resize the space because it's size is
always rounded up to a function of the GiB/GB and number of storage
nodes.
"""
volname = self._get_space_name(volume)
info = self._get_space_size_redundancy(volname)
volnewbytes = new_size * units.Gi
new_size_g = math.ceil(float(volnewbytes) / float(self.SPACEGB))
wantedsize_g = self._adjust_size_g(new_size_g)
havesize_g = (info['sizeBytes'] // self.SPACEGB)
if havesize_g >= wantedsize_g:
return # Already big enough, happens with redundancy
else:
# Have to extend it
delta = int(wantedsize_g - havesize_g)
params = [self.VGCCLUSTER, 'space-extend']
params += ['-n', six.text_type(volname)]
params += ['-s', six.text_type(delta)]
params += self._make_server_list()
try:
self._execute(*params, run_as_root=True)
except processutils.ProcessExecutionError as err:
msg = _("Error in space-extend for volume %(space)s with "
"%(size)d additional GB") % {'space': volname,
'size': delta}
self._handle_blocked(err, msg)
def initialize_connection(self, volume, connector):
"""Return connection information.
Need to return noremovehost so that the Nova host
doesn't accidentally remove us from the apphost list if it is
running on the same host (like in devstack testing).
"""
hgst_properties = {'name': volume['provider_id'],
'noremovehost': self._hostname()}
return {'driver_volume_type': 'hgst',
'data': hgst_properties}
def local_path(self, volume):
"""Query the provider_id to figure out the proper devnode."""
return "/dev/" + self._get_space_name(volume)
def create_export(self, context, volume, connector):
# Not needed for spaces
pass
def remove_export(self, context, volume):
# Not needed for spaces
pass
def terminate_connection(self, volume, connector, **kwargs):
# Not needed for spaces
pass
def ensure_export(self, context, volume):
# Not needed for spaces
pass
|
|
#
#
# File: utilities.py
#
#
#
#
#
#
# File: data_acquisition.py
# Class for monitoring data acquisition
#
#
import time
import json
class Data_Acquisition(object):
def __init__(self):
pass
def process_fifteen_second_data( self, chainFlowHandle,chainOjb, parameters, event):
print ("received 15 second tick")
self.common_process( self.fifteen_list, self.fifteen_store )
return "CONTINUE"
def process_minute_data( self,chainFlowHandle, chainOjb, parameters, event ):
print ("received minute_tick")
self.common_process( self.minute_list, self.minute_store )
return "CONTINUE"
def process_hour_data( self,chainFlowHandle, chainOjb, parameters, event ):
print ("received hour tick")
self.common_process( self.hour_list , self.hour_store)
return "CONTINUE"
def process_daily_data( self,chainFlowHandle, chainOjb, parameters, event ):
print ("received day tick")
self.common_process( self.daily_list , self.daily_store )
return "CONTINUE"
def common_process( self, data_list , store_element ):
if len(data_list) == 0:
return
#print( "data_list", store_element['measurement'] )
data_dict = {}
for i in data_list:
#print(i)
temp_data = self.slave_interface( i)
data_dict[i["name"]] = temp_data
data_dict["namespace"] = store_element["namespace"]
data_dict["time_stamp"] = time.strftime( "%b %d %Y %H:%M:%S",time.localtime(time.time()))
data_json = json.dumps(data_dict)
redis_key = store_element["measurement"]
redis_array_length = store_element["length"]
#print( "redis_key",redis_key,redis_array_length)
self.redis_handle.lpush(redis_key,data_json)
self.redis_handle.ltrim(redis_key,0,redis_array_length)
#print( "print array length", self.redis_handle.llen(redis_key))
def execute_init_tags( self, data_list ):
for i in data_list:
if "init_tag" in i == True:
self.gm.execute_cb_handlers( i["init_tag"][0], None , i["init_tag"])
def slave_interface( self, element_descriptor ):
action_function = self.load_slave_element( element_descriptor )
if action_function != None:
# find modbus address
modbus_address = self.slave_dict[element_descriptor["modbus_remote"]]["modbus_address"]
return_value = action_function( modbus_address, element_descriptor["parameters"])
else:
return_value = None
if "exec_tag" in element_descriptor:
exec_tag = element_descriptor["exec_tag"]
return_value = self.gm.execute_cb_handlers( exec_tag[0], return_value,exec_tag )
else:
pass
return return_value
def load_slave_element(self, list_item):
return_value = None
remote = list_item["modbus_remote"]
if remote in self.slave_dict:
slave_element = self.slave_dict[remote]
slave_class = slave_element["class"]
m_tags = slave_class.m_tags
if list_item["m_tag"] in m_tags:
return_value = m_tags[list_item["m_tag"]]
return return_value
def verify_slave_tags( self):
for i in self.daily_list:
self.verify_slave_element(i)
for i in self.hour_list:
self.verify_slave_element(i)
for i in self.minute_list:
self.verify_slave_element(i)
for i in self.fifteen_list:
self.verify_slave_element(i)
def verify_slave_element(self, list_item):
try:
remote = list_item["modbus_remote"]
if remote != "skip_controller":
slave_element = self.slave_dict[remote]
slave_class = slave_element["class"]
m_tags = slave_class.m_tags
m_tag_function = m_tags[list_item["m_tag"]]
if "init_tag" in list_item:
init_tag = list_item["init_tag"][0]
if self.gm.verify_handler( init_tag ) == False:
raise ValueError("Bad init tag "+list_item["init_tag"])
if "exec_tag" in list_item:
exec_tag = list_item["exec_tag"][0]
if self.gm.verify_handler( exec_tag ) == False:
raise ValueError("Bad exec tag "+list_item["exec_tag"] )
except:
print( "list_item",list_item)
raise
def add_chains( cf,data_acquisition ):
cf.define_chain("fifteen_second_list",True)
cf.insert.one_step( data_acquisition.process_fifteen_second_data )
cf.insert.wait_event_count( event ="TIME_TICK", count = 15 )
cf.insert.reset()
cf.define_chain("minute_list",True)
cf.insert.wait_event_count( event ="MINUTE_TICK" )
cf.insert.one_step( data_acquisition.process_minute_data)
cf.insert.reset()
cf.define_chain("hour_list",True)
cf.insert.wait_event_count( event ="HOUR_TICK" )
cf.insert.one_step( data_acquisition.process_hour_data)
cf.insert.reset()
cf.define_chain("daily_list",True)
cf.insert.wait_event_count( event ="DAY_TICK" )
cf.insert.one_step( data_acquisition.process_daily_data)
cf.insert.reset()
def construct_class( redis_handle,
gm,instrument,
remote_classes,
fifteen_store,
minute_store,
hour_store,
daily_store,
fifteen_list,
minute_list,
hour_list,
daily_list ):
#
# Adding in graph call back handlers
#
#
#
#queue_name = status_stores[0]["queue_name"]
slave_nodes = list(gm.match_terminal_relationship( "REMOTE_UNIT"))
slave_dict = {}
for i in slave_nodes:
class_inst = remote_classes.find_class( i["type"] )
slave_dict[i["name"]] = { "modbus_address": i["modbus_address"], "type":i["type"], "class":class_inst }
data_acquisition = Data_Acquisition( )
data_acquisition.redis_handle = redis_handle
data_acquisition.gm = gm
data_acquisition.minute_list = minute_list
data_acquisition.hour_list = hour_list
data_acquisition.daily_list = daily_list
data_acquisition.minute_store = minute_store
data_acquisition.hour_store = hour_store
data_acquisition.daily_store = daily_store
data_acquisition.instrument = instrument
data_acquisition.remote_classes = remote_classes
#data_acquisition.status_queue_class = status_queue_class
data_acquisition.slave_dict = slave_dict
data_acquisition.fifteen_store = fifteen_store
data_acquisition.fifteen_list = fifteen_list
#
#
# Verifying graph vs slave nodes
#
#
#
#
data_acquisition.verify_slave_tags()
data_acquisition.execute_init_tags( minute_list )
data_acquisition.execute_init_tags( hour_list )
data_acquisition.execute_init_tags( daily_list )
#data_acquisition.execute_init_tags( fifteen_list )
#data_acquisition.process_hour_data( None, None,None,None )
#data_acquisition.process_minute_data( None, None,None,None )
#data_acquisition.process_daily_data( None,None,None,None )
#data_acquisition.process_hour_data( None, None,None,None )
#data_acquisition.process_minute_data( None, None,None,None )
#data_acquisition.process_daily_data( None,None,None,None )
return data_acquisition
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 DST Controls
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
osisoftpy.tests.test_attribute_insert.py
~~~~~~~~~~~~
Tests for the update_value and update_values functions
in the`osisoftpy.attribute` module.
"""
#Copied and pasted from points insert tests
import osisoftpy
import pytest
import time
from osisoftpy.exceptions import MismatchEntriesError
from datetime import datetime
# https://techsupport.osisoft.com/Troubleshooting/Known-Issues/176830
piserverissue = True
# Testing values
# timestamp = *-1h
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
@pytest.mark.parametrize('value', [618])
def test_attribute_update_value_single(webapi, query, now, value, ci, pythonversion):
timestamp = now.shift(hours=-1).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, value)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Testing "good"
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
# timestamp = *-2h
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
@pytest.mark.parametrize('value', [2017])
@pytest.mark.parametrize('good', [True, False])
def test_attribute_update_good_flag(webapi, query, now, value, good, ci, pythonversion):
timestamp = now.shift(hours=-2).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, value, good=good)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.good == good
# Testing "questionable"
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-02-01 08:00'])
@pytest.mark.parametrize('value', [2018])
@pytest.mark.parametrize('questionable', [True, False])
def test_attribute_update_questionable_flag(webapi, query, now, value, questionable, ci, pythonversion):
timestamp = now.shift(hours=-3).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, value, questionable=questionable)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.questionable == questionable
# Testing "unitsabbreviation"
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.skipif(True, reason="units of measure aren't being written")
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-02-01 06:00'])
@pytest.mark.parametrize('value', [2017])
@pytest.mark.parametrize('unitsabbreviation', ['m', 's', 'm/s', 'A', 'K'])
def test_attribute_update_unitsabbreviation(webapi, query, now, value, unitsabbreviation, ci, pythonversion):
pass
# Testing "updateoption" Replace
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-01 06:00'])
@pytest.mark.parametrize('value', [289])
@pytest.mark.parametrize('updateoption', ['Replace'])
def test_attribute_update_updatereplace(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-5).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Testing "updateoption" Insert
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-02 07:00'])
@pytest.mark.parametrize('value', [345])
@pytest.mark.parametrize('updateoption', ['Insert'])
def test_attribute_update_updateinsert(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-6).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Testing "updateoption" NoReplace
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-03 09:00'])
@pytest.mark.parametrize('value', [2000])
@pytest.mark.parametrize('updateoption', ['NoReplace'])
def test_attribute_update_updatenoreplace(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-7).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == 0
# Testing "updateoption" ReplaceOnly
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-04 10:00'])
@pytest.mark.parametrize('value', [65])
@pytest.mark.parametrize('updateoption', ['ReplaceOnly'])
def test_attribute_update_updatereplaceonly(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-8).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Testing "updateoption" InsertNoCompression
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-05 11:00'])
@pytest.mark.parametrize('value', [1])
@pytest.mark.parametrize('updateoption', ['InsertNoCompression'])
def test_attribute_update_updateinsertnocomp(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-13).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Testing "updateoption" Remove
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamp', ['2017-03-06 12:00'])
@pytest.mark.parametrize('value', [908])
@pytest.mark.parametrize('updateoption', ['Remove'])
def test_attribute_update_updateremove(webapi, query, now, value, updateoption, ci, pythonversion):
timestamp = now.shift(hours=-9).format('YYYY-MM-DD HH:mm:ss ZZ')
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_value(timestamp, 0, updateoption='Replace')
time.sleep(0.5)
insertAttribute.update_value(timestamp, value, updateoption=updateoption)
time.sleep(0.5)
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == 0
#update_values
# Test Multiple Inputs
# @pytest.mark.skipif(piserverissue, reason='PI Server times out when retrieving archived values')
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
# @pytest.mark.parametrize('timestamps', [['2017-03-07 06:00','2017-03-07 07:00','2017-03-07 08:00','2017-03-07 09:00','2017-03-07 10:00']])
@pytest.mark.parametrize('values', [[217,218,216]])
def test_attribute_multiple_update(webapi, query, now, values, ci, pythonversion):
timestamps = [now.shift(hours=-10).format('YYYY-MM-DD HH:mm:ss ZZ'), now.shift(hours=-11).format('YYYY-MM-DD HH:mm:ss ZZ'), now.shift(hours=-12).format('YYYY-MM-DD HH:mm:ss ZZ')]
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
insertAttribute = element['PythonAFInserted']
insertAttribute.update_values(timestamps, values)
time.sleep(0.5)
for timestamp, value in zip(timestamps, values):
v = insertAttribute.recordedattime(time=timestamp)
assert v.value == value
# Test Mismatched arrays (Timestamps and Values)
@pytest.mark.parametrize('query', ['attributename:PythonAFInserted'])
@pytest.mark.parametrize('timestamps', [['2017-02-01 06:00','2017-02-01 07:00','2017-02-01 08:00','2017-02-01 09:00','2017-02-01 10:00']])
@pytest.mark.parametrize('values', [[2017,2018,2019,2020]])
def test_attribute_multiple_mismatch(webapi, query, timestamps, values, ci, pythonversion):
if ci == 'test':
elementname = 'Attributes'
else:
elementname = pythonversion or ci
parent = ci
elements = webapi.elements(query='{} AND name:{}'.format(query, elementname))
if len(elements) > 1:
element = [ele for ele in elements if parent in ele.paths[0]][0]
else:
element = elements[0]
with pytest.raises(MismatchEntriesError) as err:
insertAttribute = element['PythonAFInserted']
insertAttribute.update_values(timestamps, values)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations(object):
"""ExpressRouteCircuitAuthorizationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitAuthorization"
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
authorization_parameters, # type: "_models.ExpressRouteCircuitAuthorization"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitAuthorization"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
authorization_name, # type: str
authorization_parameters, # type: "_models.ExpressRouteCircuitAuthorization"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitAuthorization"]
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AuthorizationListResult"]
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
|
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the organization applications process."""
import json
import os
from soc.models import org_app_survey
from tests import profile_utils
from tests import test_utils
from tests import timeline_utils
ORG_APP_SCHEMA = ([
["frm-t1359271954246-item", "frm-t1359347613687-item",
"frm-t1359347873346-item", "frm-t1359347876071-item"],
{
"frm-t1359271954246-item": {
"field_type": "input_text",
"required": True,
"label": "Text field sample question?",
},
"frm-t1359347613687-item": {
"field_type": "textarea",
"required": False,
"label": "Paragraph field sample question?",
},
"frm-t1359347873346-item": {
"field_type": "checkbox",
"required": True,
"other": False,
"values": [{
"value": "Ckbx 1",
"checked": False,
},
{
"value": "Ckbx 2",
"checked": False,
},
{
"value": "Ckbx 3",
"checked": False,
},
{
"value": "Ckbx 4",
"checked": False,
}],
"label": "Checkbox field sample question?",
},
"frm-t1359347876071-item": {
"field_type": "radio",
"required": False,
"other": True,
"values": [{
"value": "Radio1",
"checked": False,
},
{
"value": "Radio2",
"checked": False,
},
{
"value": "Radio3",
"checked": False,
},
{
"value": "Radio4",
"checked": False
}],
"label": "Radio field sample question?",
}
}
])
class OrgAppTest(test_utils.GSoCDjangoTestCase):
"""Tests for organization applications to be submitted by organizations.
"""
def setUp(self):
self.init()
def assertOrgAppCreateOrEditTemplatesUsed(self, response):
"""Asserts that all the templates from the org app create were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'modules/gsoc/org_app/edit.html')
self.assertTemplateUsed(response, 'modules/gsoc/_form.html')
def getOrgAppCreatePostData(self):
"""Returns the post data dictionary for creating or editing org app."""
time_fmt = '%Y-%m-%d %H:%M:%S'
return {
'title': 'GSoC Org App',
'short_name': 'GSoCOA',
'content': 'Organization application for GSoC',
'survey_start': timeline_utils.past().strftime(time_fmt),
'survey_end': timeline_utils.future().strftime(time_fmt),
'schema': json.dumps(ORG_APP_SCHEMA),
}
def testOrgAppCreateOrEditByProgramAdmin(self):
"""Tests that program admin can create an organization application.
"""
# Make sure we do not have an org app for this test.
self.org_app.delete()
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseOK(response)
self.assertOrgAppCreateOrEditTemplatesUsed(response)
org_app_key_name = 'gsoc_program/%s/orgapp' % (self.gsoc.key().name(),)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
response = self.post(url, self.getOrgAppCreatePostData())
print response.content
self.assertResponseRedirect(response, url + '?validated')
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertNotEqual(org_app, None)
def testOrgAppCreateOrEditByNonUser(self):
"""Tests that a non-user cannot create an organization application.
"""
# Make sure we do not have an org app for this test.
self.org_app.delete()
current_logged_in_account = os.environ.get('USER_EMAIL', None)
try:
os.environ['USER_EMAIL'] = ''
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseRedirect(response)
expected_redirect_address = ('https://www.google.com/accounts/Login?'
+ 'continue=http%3A//some.testing.host.tld' + url)
actual_redirect_address = response.get('location', None)
self.assertEqual(expected_redirect_address, actual_redirect_address)
response = self.post(url, self.getOrgAppCreatePostData())
actual_redirect_address = response.get('location', None)
self.assertEqual(expected_redirect_address, actual_redirect_address)
finally:
if current_logged_in_account is None:
del os.environ['USER_EMAIL']
else:
os.environ['USER_EMAIL'] = current_logged_in_account
def testOrgAppCreateOrEditByUserNoRole(self):
"""Tests that a user with no role cannot create an organization application.
"""
# Make sure we do not have an org app for this test.
self.org_app.delete()
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
org_app_key_name = 'gsoc_program/%s/orgapp' % (self.gsoc.key().name(),)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
response = self.post(url, self.getOrgAppCreatePostData())
self.assertResponseForbidden(response)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
def testOrgAppCreateOrEditByOrgAdmin(self):
"""Tests that an org admin cannot create an organization application.
"""
# Make sure we do not have an org app for this test.
self.org_app.delete()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
org_app_key_name = 'gsoc_program/%s/orgapp' % (self.gsoc.key().name(),)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
response = self.post(url, self.getOrgAppCreatePostData())
self.assertResponseForbidden(response)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
def testOrgAppCreateOrEditByMentor(self):
"""Tests that a mentor cannot create an organization application.
"""
# Make sure we do not have an org app for this test.
self.org_app.delete()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, mentor_for=[self.org.key])
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
org_app_key_name = 'gsoc_program/%s/orgapp' % (self.gsoc.key().name(),)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
response = self.post(url, self.getOrgAppCreatePostData())
self.assertResponseForbidden(response)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
def testOrgAppCreateOrEditByStudent(self):
"""Tests that a student cannot create an organization application."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBStudent(self.program, user=user)
# Make sure we do not have an org app for this test.
self.org_app.delete()
url = '/gsoc/org/application/edit/' + self.gsoc.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
org_app_key_name = 'gsoc_program/%s/orgapp' % (self.gsoc.key().name(),)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
response = self.post(url, self.getOrgAppCreatePostData())
self.assertResponseForbidden(response)
org_app = org_app_survey.OrgAppSurvey.get_by_key_name(org_app_key_name)
self.assertIsNone(org_app)
|
|
from __future__ import print_function
from __future__ import unicode_literals
from inspect import getdoc
from operator import attrgetter
import logging
import re
import signal
import sys
from docker.errors import APIError
import dockerpty
from .. import __version__
from .. import legacy
from ..const import DEFAULT_TIMEOUT
from ..project import NoSuchService, ConfigurationError
from ..service import BuildError, NeedsBuildError
from ..config import parse_environment
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import yesno, get_version_info
log = logging.getLogger(__name__)
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
def setup_logging():
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
"""
insecure_registry = options['--allow-insecure-ssl']
project.pull(
service_names=options['SERVICE'],
insecure_registry=insecure_registry
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False)
)
else:
print("No stopped containers")
def run(self, project, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run container in the background, print
new container name.
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
"""
service = project.get_service(options['SERVICE'])
insecure_registry = options['--allow-insecure-ssl']
if not options['--no-deps']:
deps = service.get_linked_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
allow_recreate=False,
insecure_registry=insecure_registry,
)
tty = True
if options['-d'] or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = {
'command': command,
'tty': tty,
'stdin_open': not options['-d'],
'detach': options['-d'],
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
try:
container = service.create_container(
quiet=True,
one_off=True,
insecure_registry=insecure_registry,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if options['-d']:
service.start_container(container)
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
project.get_service(service_name).scale(num, timeout=timeout)
def start(self, project, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
project.start(service_names=options['SERVICE'])
def stop(self, project, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, project, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.restart(service_names=options['SERVICE'], timeout=timeout)
def up(self, project, options):
"""
Build, (re)create, start and attach to containers for a service.
By default, `docker-compose up` will aggregate the output of each container, and
when it exits, all containers will be stopped. If you run `docker-compose up -d`,
it'll start the containers in the background and leave them running.
If there are existing containers for a service, `docker-compose up` will stop
and recreate them (preserving mounted volumes with volumes-from),
so that changes in `docker-compose.yml` are picked up. If you do not want existing
containers to be recreated, `docker-compose up --no-recreate` will re-use existing
containers.
Usage: up [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run containers in the background,
print new container names.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--x-smart-recreate Only recreate containers whose configuration or
image needs to be updated. (EXPERIMENTAL)
--no-recreate If containers already exist, don't recreate them.
--no-build Don't build an image, even if it's missing
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
"""
insecure_registry = options['--allow-insecure-ssl']
detached = options['-d']
monochrome = options['--no-color']
start_deps = not options['--no-deps']
allow_recreate = not options['--no-recreate']
smart_recreate = options['--x-smart-recreate']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
to_attach = project.up(
service_names=service_names,
start_deps=start_deps,
allow_recreate=allow_recreate,
smart_recreate=smart_recreate,
insecure_registry=insecure_registry,
do_build=not options['--no-build'],
timeout=timeout
)
if not detached:
print("Attaching to", list_containers(to_attach))
log_printer = LogPrinter(to_attach, attach_params={"logs": True}, monochrome=monochrome)
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
def migrate_to_labels(self, project, _options):
"""
Recreate containers to add labels
If you're coming from Compose 1.2 or earlier, you'll need to remove or
migrate your existing containers after upgrading Compose. This is
because, as of version 1.3, Compose uses Docker labels to keep track
of containers, and so they need to be recreated with labels added.
If Compose detects containers that were created without labels, it
will refuse to run so that you don't end up with two sets of them. If
you want to keep using your existing containers (for example, because
they have data volumes you want to preserve) you can migrate them with
the following command:
docker-compose migrate-to-labels
Alternatively, if you're not worried about keeping them, you can
remove them - Compose will just create new ones.
docker rm -f myapp_web_1 myapp_db_1 ...
Usage: migrate-to-labels
"""
legacy.migrate_project_to_labels(project)
def version(self, project, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def list_containers(containers):
return ", ".join(c.name for c in containers)
|
|
from ctypes import *
import datetime
import traceback
import sys
import os.path
from time import sleep
def errorCheck(stat):
"""Check the error status and print if error"""
if stat != 0:
print stat
print c_char_p(veristandInterOp.GetLastErrorMessage())
quit()
#print os.path.abspath(file)
print "TestScript - CVI dll - test functions 1.8.py"
veristandInterOp = cdll.LoadLibrary("..\VeristandPythonInterop\Binaries\VeriStandCviDll.dll")
print veristandInterOp
print "LaunchVeriStand"
errorCheck(veristandInterOp.LaunchVeriStand())
print "OpenProject"
errorCheck(veristandInterOp.OpenProject("d:\\NI Projects\\EXAM & cPython\\Python development\\cPython-Interface-for-NI-VeriStand\\VS\\Sinewave UnitTest.nivsproj"))
print "ShowProjectWindow"
errorCheck(veristandInterOp.ShowProjectWindow())
print "DeployProject"
errorCheck(veristandInterOp.DeployProject())
print "OpenWorkspace"
errorCheck(veristandInterOp.OpenWorkspace())
sleep(2)
print "CloseWorkspace"
errorCheck(veristandInterOp.CloseWorkspace())
sleep(4)
print "OpenWorkspace"
errorCheck(veristandInterOp.OpenWorkspace())
print "GetChannelValue"
hValue = c_double(0.0)
errorCheck(veristandInterOp.GetChannelValue("Aliases/UnitTest", byref(hValue)))
print "value Aliases/UnitTest is: "
print hValue
print "SetChannelValue"
cValue = c_double(-4.0)
errorCheck(veristandInterOp.SetChannelValue("Aliases/UnitTest", cValue))
print "GetChannelValue"
errorCheck(veristandInterOp.GetChannelValue("Aliases/UnitTest", byref(hValue)))
print "value Aliases/UnitTest is: "
print hValue
print "SetChannelValueSynch"
cValue = c_double(-4.0)
cTime = c_double(0.0)
#cpTime = pointer(cTime)
errorCheck(veristandInterOp.SetChannelValueSynch("Aliases/UnitTest", cValue, byref(cTime)))
print cTime
print "GetChannelValue"
errorCheck(veristandInterOp.GetChannelValue("Aliases/UnitTest", byref(hValue)))
print "value Aliases/UnitTest is: "
print hValue
hNumOfChannels = 2
hNumOfCharacters = 255
hChannels = ((c_char * hNumOfCharacters) * hNumOfChannels)() #must match longest channel
hChannels[0].value = "Aliases/UnitTest"
hChannels[1].value = "Aliases/UnitTest1"
hValues = (c_double * hNumOfChannels)()
for i in range(hNumOfChannels):
hValues[i]=i+7
print hValues[i]
print hChannels[i].value
print hValues
hReturnValues = (c_double * hNumOfChannels)()
hpChannels = pointer(hChannels)
print "GetMultipleChannels"
errorCheck(veristandInterOp.GetMultipleChannelValues(byref(hpChannels),hReturnValues, hNumOfChannels,hNumOfCharacters))
print "value Aliases/UnitTest is: "
print hReturnValues[0]
print "value Aliases/UnitTest1 is: "
print hReturnValues[1]
print "SetMultipleChannels"
errorCheck(veristandInterOp.SetMultipleChannelValues(byref(hpChannels),hValues, hNumOfChannels,hNumOfCharacters))
print "GetMultipleChannels"
errorCheck(veristandInterOp.GetMultipleChannelValues(byref(hpChannels),hReturnValues, hNumOfChannels,hNumOfCharacters))
print "value Aliases/UnitTest is: "
print hReturnValues[0]
print "value Aliases/UnitTest1 is: "
print hReturnValues[1]
print "SetMultipleChannelsSynch"
for i in range(hNumOfChannels):
hValues[i]=i+2
errorCheck(veristandInterOp.SetMultipleChannelValuesSynch(byref(hpChannels),hValues, hNumOfChannels,hNumOfCharacters,byref(cTime)))
print cTime
print "GetMultipleChannels"
errorCheck(veristandInterOp.GetMultipleChannelValues(byref(hpChannels),hReturnValues, hNumOfChannels,hNumOfCharacters))
print "value Aliases/UnitTest is: "
print hReturnValues[0]
print "value Aliases/UnitTest1 is: "
print hReturnValues[1]
print "GetActiveProjects"
vNumberOfProjects=c_int(0)
vProject = (c_char * hNumOfCharacters)()
errorCheck(veristandInterOp.GetActiveProject(byref(vProject),0, byref(vNumberOfProjects)))
print "Active projects are: "
print vProject.value
print "Is Open Project Running?"
vRunning=c_int(0)
errorCheck(veristandInterOp.IsOpenProjectRunning(byref(vRunning)))
print vRunning
print "ConfigureDatalogging"
lNumOfChannels = 2
lNumOfCharacters = 255
lRate = c_double(100.0)
lRateString = "100.0"
lFilePropertiesNamesLineLength = 255
lFilePropertiesValuesLineLength = 255
lChannelsToLogLineLength = 255
lFilePropertiesNamesLength = 3
lFilePropertiesValuesLength = 3
lChannelsToLogLength = 4
taskName = "testLog"
taskName2 = "testLog2"
taskName3 = "testLog3"
logPath = "C:\\Temp\\cPython\\pyTest.tdms"
logPath2 = "C:\\Temp\\cPython\\pyTest2.tdms"
logPath2T = "C:\\Temp\\cPython\\pyTest2T.tdms"
lTriggerHighLimit = c_double(0.5)
lTriggerLowLimit = c_double(-0.5)
lReplaceFile = c_int(1)
lTriggerType = c_int(0)
lTriggerChannel = " "
fTriggerLevel = c_double(60)
iTriggerSlope = c_int(1) #Raising
fDuration = c_double(-1)
fPreTriggerDuration = c_double(0)
lChannelsToLog = ((c_char * lChannelsToLogLineLength) * lChannelsToLogLength)() #must match longest channel
lChannelsToLog[0].value = "Targets/Controller/System Channels/System Time"
lChannelsToLog[1].value = "Targets/Controller/System Channels/HP Count"
lChannelsToLog[2].value = "Targets/Controller/System Channels/LP Count"
lChannelsToLog[3].value = "Targets/Controller/System Channels/Model Count"
plChannelsToLog = pointer(lChannelsToLog)
lFilePropertiesNames = ((c_char * lFilePropertiesNamesLineLength) * lFilePropertiesNamesLength)() #must match longest channel
lFilePropertiesNames[0].value = "LogPath"
lFilePropertiesNames[1].value = "LRate"
lFilePropertiesNames[2].value = "TaskName"
plFilePropertiesNames = pointer(lFilePropertiesNames)
lFilePropertiesValues = ((c_char * lFilePropertiesValuesLineLength) * lFilePropertiesValuesLength)() #must match longest channel
lFilePropertiesValues[0].value = logPath
lFilePropertiesValues[1].value = lRateString
lFilePropertiesValues[2].value = taskName
plFilePropertiesValues = pointer(lFilePropertiesValues)
lShortNames = ((c_char * lChannelsToLogLineLength) * lChannelsToLogLength)() #must match longest channel
lShortNames[0].value = "System Time"
lShortNames[1].value = "HP Count"
lShortNames[2].value = "LP Count"
lShortNames[3].value = "Model Count"
plShortNames = pointer(lShortNames)
print "StartDatalogging"
errorCheck(veristandInterOp.StartDataLogging(taskName,taskName, logPath, lTriggerHighLimit,lTriggerLowLimit, lReplaceFile, lTriggerType,
lTriggerChannel,lRate, byref(plFilePropertiesNames),lFilePropertiesNamesLength,
lFilePropertiesNamesLineLength,byref(plFilePropertiesValues),
lFilePropertiesValuesLength,lFilePropertiesValuesLineLength,
byref(plChannelsToLog),lChannelsToLogLength,lChannelsToLogLineLength))
sleep(5)
print "StopDatalogging"
errorCheck(veristandInterOp.StopDataLogging(taskName))
print "StartDatalogging2"
errorCheck(veristandInterOp.StartDataLogging2(taskName2,taskName2, logPath2, fTriggerLevel, lReplaceFile,iTriggerSlope,
lTriggerChannel,lRate, byref(plFilePropertiesNames),lFilePropertiesNamesLength,
lFilePropertiesNamesLineLength,byref(plFilePropertiesValues),
lFilePropertiesValuesLength,lFilePropertiesValuesLineLength,
byref(plChannelsToLog),lChannelsToLogLength,lChannelsToLogLineLength,fDuration,fPreTriggerDuration,
byref(plShortNames),lChannelsToLogLength,lChannelsToLogLineLength))
sleep(5)
print "StopDatalogging2"
errorCheck(veristandInterOp.StopDataLogging2(taskName2))
lTriggerChannel = "Targets/Controller/System Channels/System Time"
fTriggerLevel = c_double(30)
iTriggerSlope = c_int(1) #Raising
fDuration = c_double(9)
fPreTriggerDuration = c_double(3)
print "StartDatalogging2Triggered"
errorCheck(veristandInterOp.StartDataLogging2(taskName3,taskName3, logPath2T, fTriggerLevel, lReplaceFile,iTriggerSlope,
lTriggerChannel,lRate, byref(plFilePropertiesNames),lFilePropertiesNamesLength,
lFilePropertiesNamesLineLength,byref(plFilePropertiesValues),
lFilePropertiesValuesLength,lFilePropertiesValuesLineLength,
byref(plChannelsToLog),lChannelsToLogLength,lChannelsToLogLineLength,fDuration,fPreTriggerDuration,
byref(plShortNames),lChannelsToLogLength,lChannelsToLogLineLength))
sLogStatus = (c_char * hNumOfCharacters)()
iLogStatus=c_int(0)
errorCheck(veristandInterOp.GetDataLogging2SessionState(taskName3,byref(sLogStatus)))
errorCheck(veristandInterOp.GetDataLogging2State(taskName3,byref(iLogStatus)))
print sLogStatus.value
print iLogStatus
sleep(15)
errorCheck(veristandInterOp.GetDataLogging2SessionState(taskName3,byref(sLogStatus)))
errorCheck(veristandInterOp.GetDataLogging2State(taskName3,byref(iLogStatus)))
print sLogStatus.value
print iLogStatus
sleep(30)
errorCheck(veristandInterOp.GetDataLogging2SessionState(taskName3,byref(sLogStatus)))
errorCheck(veristandInterOp.GetDataLogging2State(taskName3,byref(iLogStatus)))
print sLogStatus.value
print iLogStatus
print "StopDatalogging2Triggered"
errorCheck(veristandInterOp.StopDataLogging2(taskName3))
RTsequencePath = "d:\\NI Projects\\EXAM & cPython\\Python development\\cPython-Interface-for-NI-VeriStand\\VS\\Stimulus Profiles\\RT Sequence and Stimulus profile\\Test RT seq.nivsseq"
stimulusPath = "d:\\NI Projects\\EXAM & cPython\\Python development\\cPython-Interface-for-NI-VeriStand\\VS\\Stimulus Profiles\\RT Sequence and Stimulus profile\\Test Stimulus.nivsstimprof"
UUTnumber = "010"
iStimulusState=c_int(0)
iRTSequenceState=c_int(0)
fRTSequenceReturnValue=c_double(0)
lParamNamesLineLength = 255
lParamValuesLineLength = 255
lParamTypesLineLength = 255
lParamNamesLength = 2
lParamValuesLength = 2
lParamTypesLength = 2
lParamNames = ((c_char * lParamNamesLineLength) * lParamNamesLength)() #must match longest channel
lParamNames[0].value = "UnitTest"
lParamNames[1].value = "WaitParam"
plParamNames = pointer(lParamNames)
lParamValues = ((c_char * lParamValuesLineLength) * lParamValuesLength)() #must match longest channel
lParamValues[0].value = "Aliases/UnitTest1"
lParamValues[1].value = "5"
plParamValues = pointer(lParamValues)
lParamTypes = ((c_char * lParamTypesLineLength) * lParamTypesLength)() #must match longest channel
lParamTypes[0].value = "Path"
lParamTypes[1].value = "Double"
plParamTypes = pointer(lParamTypes)
#StimulusExecuteAsynch
errorCheck(veristandInterOp.StimulusExecuteAsynch(stimulusPath,UUTnumber))
print "Stimulus started"
sleep(2)
errorCheck(veristandInterOp.GetStimulusState(byref(iStimulusState)))
print iStimulusState
sleep(10)
errorCheck(veristandInterOp.GetStimulusState(byref(iStimulusState)))
print iStimulusState
#RTSequenceExecuteAsynch
errorCheck(veristandInterOp.RTSequenceExecuteAsynch(RTsequencePath,byref(plParamNames),lParamNamesLength, lParamNamesLineLength,
byref(plParamValues),lParamValuesLength, lParamValuesLineLength,
byref(plParamTypes),lParamTypesLength,lParamTypesLineLength))
print "RT Sequence started"
sleep(2)
errorCheck(veristandInterOp.GetRTSequenceState(byref(iRTSequenceState)))
print iRTSequenceState
sleep(15)
errorCheck(veristandInterOp.GetRTSequenceState(byref(iRTSequenceState)))
print iRTSequenceState
errorCheck(veristandInterOp.GetRTSequenceReturnValue(byref(fRTSequenceReturnValue)))
print fRTSequenceReturnValue
#mandatory
print "RT Sequence undeploy"
errorCheck(veristandInterOp.RTSequenceUndeploy())
print "CloseWorkspace"
errorCheck(veristandInterOp.CloseWorkspace())
print "UndeployProject"
errorCheck(veristandInterOp.UndeployProject())
print "CloseProject"
errorCheck(veristandInterOp.CloseProject())
print "CloseVeriStand"
errorCheck(veristandInterOp.CloseVeriStand())
sleep(10)
print "LaunchVeriStandRunAs"
errorCheck(veristandInterOp.LaunchVeriStandRunAs())
sleep(10)
print "CloseVeriStand"
errorCheck(veristandInterOp.CloseVeriStand())
print "Exit"
|
|
"""
Support for Osram Lightify.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.osramlightify/
"""
import logging
import socket
import random
from datetime import timedelta
import voluptuous as vol
from homeassistant import util
from homeassistant.const import CONF_HOST
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_RGB_COLOR,
ATTR_TRANSITION, EFFECT_RANDOM, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT,
SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['https://github.com/tfriedel/python-lightify/archive/'
'd6eadcf311e6e21746182d1480e97b350dda2b3e.zip#lightify==1.0.4']
_LOGGER = logging.getLogger(__name__)
TEMP_MIN = 2000 # lightify minimum temperature
TEMP_MAX = 6500 # lightify maximum temperature
TEMP_MIN_HASS = 154 # home assistant minimum temperature
TEMP_MAX_HASS = 500 # home assistant maximum temperature
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
SUPPORT_OSRAMLIGHTIFY = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP |
SUPPORT_EFFECT | SUPPORT_RGB_COLOR |
SUPPORT_TRANSITION)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Osram Lightify lights."""
import lightify
host = config.get(CONF_HOST)
if host:
try:
bridge = lightify.Lightify(host)
except socket.error as err:
msg = 'Error connecting to bridge: {} due to: {}'.format(host,
str(err))
_LOGGER.exception(msg)
return False
setup_bridge(bridge, add_devices)
else:
_LOGGER.error('No host found in configuration')
return False
def setup_bridge(bridge, add_devices_callback):
"""Setup the Lightify bridge."""
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
"""Update the lights objects with latest info from bridge."""
bridge.update_all_light_status()
new_lights = []
for (light_id, light) in bridge.lights().items():
if light_id not in lights:
osram_light = OsramLightifyLight(light_id, light,
update_lights)
lights[light_id] = osram_light
new_lights.append(osram_light)
else:
lights[light_id].light = light
if new_lights:
add_devices_callback(new_lights)
update_lights()
class OsramLightifyLight(Light):
"""Representation of an Osram Lightify Light."""
def __init__(self, light_id, light, update_lights):
"""Initialize the light."""
self._light = light
self._light_id = light_id
self.update_lights = update_lights
self._brightness = 0
self._rgb = (0, 0, 0)
self._name = ""
self._temperature = TEMP_MIN
self._state = False
self.update()
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def rgb_color(self):
"""Last RGB color value set."""
_LOGGER.debug("rgb_color light state for light: %s is: %s %s %s ",
self._name, self._rgb[0], self._rgb[1], self._rgb[2])
return self._rgb
@property
def color_temp(self):
"""Return the color temperature."""
return self._temperature
@property
def brightness(self):
"""Brightness of this light between 0..255."""
_LOGGER.debug("brightness for light %s is: %s",
self._name, self._brightness)
return self._brightness
@property
def is_on(self):
"""Update Status to True if device is on."""
_LOGGER.debug("is_on light state for light: %s is: %s",
self._name, self._state)
return self._state
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OSRAMLIGHTIFY
def turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGER.debug("turn_on Attempting to turn on light: %s ",
self._name)
self._light.set_onoff(1)
self._state = self._light.on()
if ATTR_TRANSITION in kwargs:
transition = kwargs[ATTR_TRANSITION] * 10
_LOGGER.debug("turn_on requested transition time for light:"
" %s is: %s ",
self._name, transition)
else:
transition = 0
_LOGGER.debug("turn_on requested transition time for light:"
" %s is: %s ",
self._name, transition)
if ATTR_RGB_COLOR in kwargs:
red, green, blue = kwargs[ATTR_RGB_COLOR]
_LOGGER.debug("turn_on requested ATTR_RGB_COLOR for light:"
" %s is: %s %s %s ",
self._name, red, green, blue)
self._light.set_rgb(red, green, blue, transition)
if ATTR_COLOR_TEMP in kwargs:
color_t = kwargs[ATTR_COLOR_TEMP]
kelvin = int(((TEMP_MAX - TEMP_MIN) * (color_t - TEMP_MIN_HASS) /
(TEMP_MAX_HASS - TEMP_MIN_HASS)) + TEMP_MIN)
_LOGGER.debug("turn_on requested set_temperature for light:"
" %s: %s ", self._name, kelvin)
self._light.set_temperature(kelvin, transition)
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
_LOGGER.debug("turn_on requested brightness for light: %s is: %s ",
self._name, self._brightness)
self._brightness = self._light.set_luminance(
int(self._brightness / 2.55),
transition)
if ATTR_EFFECT in kwargs:
effect = kwargs.get(ATTR_EFFECT)
if effect == EFFECT_RANDOM:
self._light.set_rgb(random.randrange(0, 255),
random.randrange(0, 255),
random.randrange(0, 255),
transition)
_LOGGER.debug("turn_on requested random effect for light:"
" %s with transition %s ",
self._name, transition)
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGER.debug("turn_off Attempting to turn off light: %s ",
self._name)
if ATTR_TRANSITION in kwargs:
transition = kwargs[ATTR_TRANSITION] * 10
_LOGGER.debug("turn_off requested transition time for light:"
" %s is: %s ",
self._name, transition)
self._light.set_luminance(0, transition)
else:
transition = 0
_LOGGER.debug("turn_off requested transition time for light:"
" %s is: %s ",
self._name, transition)
self._light.set_onoff(0)
self._state = self._light.on()
self.schedule_update_ha_state()
def update(self):
"""Synchronize state with bridge."""
self.update_lights(no_throttle=True)
self._brightness = int(self._light.lum() * 2.55)
self._name = self._light.name()
self._rgb = self._light.rgb()
o_temp = self._light.temp()
self._temperature = int(TEMP_MIN_HASS + (TEMP_MAX_HASS - TEMP_MIN_HASS)
* (o_temp - TEMP_MIN) / (TEMP_MAX - TEMP_MIN))
self._state = self._light.on()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest import mock
from unittest.mock import call
from parameterized import parameterized
from airflow import models, settings
from airflow.models import DAG, DagBag, DagModel, TaskInstance as TI, clear_task_instances
from airflow.models.dagrun import DagRun
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import ShortCircuitOperator
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import DagCallbackRequest
from airflow.utils.dates import days_ago
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import DagRunType
from tests.models import DEFAULT_DATE
from tests.test_utils.db import clear_db_pools, clear_db_runs
class TestDagRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
def setUp(self):
clear_db_runs()
clear_db_pools()
def create_dag_run(
self,
dag,
state=State.RUNNING,
task_states=None,
execution_date=None,
is_backfill=False,
):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
if is_backfill:
run_type = DagRunType.BACKFILL_JOB
else:
run_type = DagRunType.MANUAL
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, task_state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(task_state, session)
session.commit()
session.close()
return dag_run
def test_clear_task_instances_for_backfill_dagrun(self):
now = timezone.utcnow()
session = settings.Session()
dag_id = 'test_clear_task_instances_for_backfill_dagrun'
dag = DAG(dag_id=dag_id, start_date=now)
self.create_dag_run(dag, execution_date=now, is_backfill=True)
task0 = DummyOperator(task_id='backfill_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=now)
ti0.run()
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
ti0.refresh_from_db()
dr0 = session.query(DagRun).filter(DagRun.dag_id == dag_id, DagRun.execution_date == now).first()
self.assertEqual(dr0.state, State.RUNNING)
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_type=DagRunType.MANUAL,
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_type=DagRunType.MANUAL,
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(dag_id='test_dagrun_success_when_all_skipped', start_date=timezone.datetime(2017, 1, 1))
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False
)
dag_task2 = DummyOperator(task_id='test_state_skipped1', dag=dag)
dag_task3 = DummyOperator(task_id='test_state_skipped2', dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
dag_run.update_state()
self.assertEqual(State.SUCCESS, dag_run.state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG('test_dagrun_success_conditions', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='test_dagrun_success_conditions', state=State.RUNNING, execution_date=now, start_date=now
)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
dr.update_state()
self.assertEqual(State.RUNNING, dr.state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
dr.update_state()
self.assertEqual(State.SUCCESS, dr.state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG('text_dagrun_deadlock', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='test_dagrun_deadlock', state=State.RUNNING, execution_date=now, start_date=now
)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock_with_shutdown(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock_with_shutdown', start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='upstream_task')
op2 = DummyOperator(task_id='downstream_task')
op2.set_upstream(op1)
dr = dag.create_dagrun(
run_id='test_dagrun_no_deadlock_with_shutdown',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
)
upstream_ti = dr.get_task_instance(task_id='upstream_task')
upstream_ti.set_state(State.SHUTDOWN, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_no_deadlock_with_depends_on_past(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock', start_date=DEFAULT_DATE)
with dag:
DummyOperator(task_id='dop', depends_on_past=True)
DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(
run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
)
dr2 = dag.create_dagrun(
run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
)
ti1_op1 = dr.get_task_instance(task_id='dop')
dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_dagrun_success_callback(self):
def on_success_callable(context):
self.assertEqual(context['dag_run'].dag_id, 'test_dagrun_success_callback')
dag = DAG(
dag_id='test_dagrun_success_callback',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag_task2 = DummyOperator(task_id='test_state_succeeded2', dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
_, callback = dag_run.update_state()
self.assertEqual(State.SUCCESS, dag_run.state)
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
self.assertIsNone(callback)
def test_dagrun_failure_callback(self):
def on_failure_callable(context):
self.assertEqual(context['dag_run'].dag_id, 'test_dagrun_failure_callback')
dag = DAG(
dag_id='test_dagrun_failure_callback',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag_task2 = DummyOperator(task_id='test_state_failed2', dag=dag)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
_, callback = dag_run.update_state()
self.assertEqual(State.FAILED, dag_run.state)
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
self.assertIsNone(callback)
def test_dagrun_update_state_with_handle_callback_success(self):
def on_success_callable(context):
self.assertEqual(
context['dag_run'].dag_id, 'test_dagrun_update_state_with_handle_callback_success'
)
dag = DAG(
dag_id='test_dagrun_update_state_with_handle_callback_success',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag_task2 = DummyOperator(task_id='test_state_succeeded2', dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
_, callback = dag_run.update_state(execute_callbacks=False)
self.assertEqual(State.SUCCESS, dag_run.state)
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback == DagCallbackRequest(
full_filepath=dag_run.dag.fileloc,
dag_id="test_dagrun_update_state_with_handle_callback_success",
execution_date=dag_run.execution_date,
is_failure_callback=False,
msg="success",
)
def test_dagrun_update_state_with_handle_callback_failure(self):
def on_failure_callable(context):
self.assertEqual(
context['dag_run'].dag_id, 'test_dagrun_update_state_with_handle_callback_failure'
)
dag = DAG(
dag_id='test_dagrun_update_state_with_handle_callback_failure',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag_task2 = DummyOperator(task_id='test_state_failed2', dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
_, callback = dag_run.update_state(execute_callbacks=False)
self.assertEqual(State.FAILED, dag_run.state)
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback == DagCallbackRequest(
full_filepath=dag_run.dag.fileloc,
dag_id="test_dagrun_update_state_with_handle_callback_failure",
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg="task_failure",
)
def test_dagrun_set_state_end_date(self):
session = settings.Session()
dag = DAG('test_dagrun_set_state_end_date', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='test_dagrun_set_state_end_date', state=State.RUNNING, execution_date=now, start_date=now
)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.add(dr)
session.commit()
self.assertIsNone(dr.end_date)
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
dr.set_state(State.RUNNING)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one()
self.assertIsNone(dr_database.end_date)
dr.set_state(State.FAILED)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_dagrun_update_state_end_date(self):
session = settings.Session()
dag = DAG(
'test_dagrun_update_state_end_date', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}
)
# A -> B
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op1.set_upstream(op2)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='test_dagrun_update_state_end_date',
state=State.RUNNING,
execution_date=now,
start_date=now,
)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.merge(dr)
session.commit()
self.assertIsNone(dr.end_date)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.SUCCESS, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
ti_op1.set_state(state=State.RUNNING, session=session)
ti_op2.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one()
self.assertEqual(dr._state, State.RUNNING)
self.assertIsNone(dr.end_date)
self.assertIsNone(dr_database.end_date)
ti_op1.set_state(state=State.FAILED, session=session)
ti_op2.set_state(state=State.FAILED, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(dag_id='test_get_task_instance_on_empty_dagrun', start_date=timezone.datetime(2017, 1, 1))
ShortCircuitOperator(task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_type=DagRunType.MANUAL,
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(dag_id='test_latest_runs_1', start_date=DEFAULT_DATE)
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 1))
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_type = DagRunType.BACKFILL_JOB
dagrun2 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
dagrun3 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
dagrun3.run_id = None
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
self.assertFalse(dagrun3.is_backfill)
def test_removed_task_instances_can_be_restored(self):
def with_all_tasks_removed(dag):
return DAG(dag_id=dag.dag_id, start_date=dag.start_date)
dag = DAG('test_task_restoration', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun = self.create_dag_run(dag)
flaky_ti = dagrun.get_task_instances()[0]
self.assertEqual('flaky_task', flaky_ti.task_id)
self.assertEqual(State.NONE, flaky_ti.state)
dagrun.dag = with_all_tasks_removed(dag)
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEqual(State.NONE, flaky_ti.state)
dagrun.dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEqual(State.NONE, flaky_ti.state)
def test_already_added_task_instances_can_be_ignored(self):
dag = DAG('triggered_dag', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='first_task', owner='test'))
dagrun = self.create_dag_run(dag)
first_ti = dagrun.get_task_instances()[0]
self.assertEqual('first_task', first_ti.task_id)
self.assertEqual(State.NONE, first_ti.state)
# Lets assume that the above TI was added into DB by webserver, but if scheduler
# is running the same method at the same time it would find 0 TIs for this dag
# and proceeds further to create TIs. Hence mocking DagRun.get_task_instances
# method to return an empty list of TIs.
with mock.patch.object(DagRun, 'get_task_instances') as mock_gtis:
mock_gtis.return_value = []
dagrun.verify_integrity()
first_ti.refresh_from_db()
self.assertEqual(State.NONE, first_ti.state)
@parameterized.expand([(state,) for state in State.task_states])
@mock.patch('airflow.models.dagrun.task_instance_mutation_hook')
def test_task_instance_mutation_hook(self, state, mock_hook):
def mutate_task_instance(task_instance):
if task_instance.queue == 'queue1':
task_instance.queue = 'queue2'
else:
task_instance.queue = 'queue1'
mock_hook.side_effect = mutate_task_instance
dag = DAG('test_task_instance_mutation_hook', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='task_to_mutate', owner='test', queue='queue1'))
dagrun = self.create_dag_run(dag)
task = dagrun.get_task_instances()[0]
session = settings.Session()
task.state = state
session.merge(task)
session.commit()
assert task.queue == 'queue2'
dagrun.verify_integrity()
task = dagrun.get_task_instances()[0]
assert task.queue == 'queue1'
@parameterized.expand(
[
(State.SUCCESS, True),
(State.SKIPPED, True),
(State.RUNNING, False),
(State.FAILED, False),
(State.NONE, False),
]
)
def test_depends_on_past(self, prev_ti_state, is_ti_success):
dag_id = 'test_depends_on_past'
dag = self.dagbag.get_dag(dag_id)
task = dag.tasks[0]
self.create_dag_run(dag, execution_date=timezone.datetime(2016, 1, 1, 0, 0, 0))
self.create_dag_run(dag, execution_date=timezone.datetime(2016, 1, 2, 0, 0, 0))
prev_ti = TI(task, timezone.datetime(2016, 1, 1, 0, 0, 0))
ti = TI(task, timezone.datetime(2016, 1, 2, 0, 0, 0))
prev_ti.set_state(prev_ti_state)
ti.set_state(State.QUEUED)
ti.run()
self.assertEqual(ti.state == State.SUCCESS, is_ti_success)
@parameterized.expand(
[
(State.SUCCESS, True),
(State.SKIPPED, True),
(State.RUNNING, False),
(State.FAILED, False),
(State.NONE, False),
]
)
def test_wait_for_downstream(self, prev_ti_state, is_ti_success):
dag_id = 'test_wait_for_downstream'
dag = self.dagbag.get_dag(dag_id)
upstream, downstream = dag.tasks
# For ti.set_state() to work, the DagRun has to exist,
# Otherwise ti.previous_ti returns an unpersisted TI
self.create_dag_run(dag, execution_date=timezone.datetime(2016, 1, 1, 0, 0, 0))
self.create_dag_run(dag, execution_date=timezone.datetime(2016, 1, 2, 0, 0, 0))
prev_ti_downstream = TI(task=downstream, execution_date=timezone.datetime(2016, 1, 1, 0, 0, 0))
ti = TI(task=upstream, execution_date=timezone.datetime(2016, 1, 2, 0, 0, 0))
prev_ti = ti.get_previous_ti()
prev_ti.set_state(State.SUCCESS)
self.assertEqual(prev_ti.state, State.SUCCESS)
prev_ti_downstream.set_state(prev_ti_state)
ti.set_state(State.QUEUED)
ti.run()
self.assertEqual(ti.state == State.SUCCESS, is_ti_success)
def test_next_dagruns_to_examine_only_unpaused(self):
"""
Check that "next_dagruns_to_examine" ignores runs from paused/inactive DAGs
"""
dag = DAG(dag_id='test_dags', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=dag.following_schedule(DEFAULT_DATE),
is_active=True,
)
session.add(orm_dag)
session.flush()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
runs = DagRun.next_dagruns_to_examine(session).all()
assert runs == [dr]
orm_dag.is_paused = True
session.flush()
runs = DagRun.next_dagruns_to_examine(session).all()
assert runs == []
session.rollback()
session.close()
@mock.patch.object(Stats, 'timing')
def test_no_scheduling_delay_for_nonscheduled_runs(self, stats_mock):
"""
Tests that dag scheduling delay stat is not called if the dagrun is not a scheduled run.
This case is manual run. Simple test for sanity check.
"""
dag = DAG(dag_id='test_dagrun_stats', start_date=days_ago(1))
dag_task = DummyOperator(task_id='dummy', dag=dag)
initial_task_states = {
dag_task.task_id: State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
dag_run.update_state()
self.assertNotIn(call(f'dagrun.{dag.dag_id}.first_task_scheduling_delay'), stats_mock.mock_calls)
@mock.patch.object(Stats, 'timing')
def test_emit_scheduling_delay(self, stats_mock):
"""
Tests that dag scheduling delay stat is set properly once running scheduled dag.
dag_run.update_state() invokes the _emit_true_scheduling_delay_stats_for_finished_state method.
"""
dag = DAG(dag_id='test_emit_dag_stats', start_date=days_ago(1))
dag_task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session = settings.Session()
orm_dag = DagModel(
dag_id=dag.dag_id,
has_task_concurrency_limits=False,
next_dagrun=dag.start_date,
next_dagrun_create_after=dag.following_schedule(dag.start_date),
is_active=True,
)
session.add(orm_dag)
session.flush()
dag_run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=State.SUCCESS,
execution_date=dag.start_date,
start_date=dag.start_date,
session=session,
)
ti = dag_run.get_task_instance(dag_task.task_id)
ti.set_state(State.SUCCESS, session)
session.commit()
session.close()
dag_run.update_state()
true_delay = (ti.start_date - dag.following_schedule(dag_run.execution_date)).total_seconds()
stats_mock.assert_called()
sched_delay_stat_call = call(f'dagrun.{dag.dag_id}.first_task_scheduling_delay', true_delay)
self.assertIn(sched_delay_stat_call, stats_mock.mock_calls)
def test_states_sets(self):
"""
Tests that adding State.failed_states and State.success_states work as expected.
"""
dag = DAG(dag_id='test_dagrun_states', start_date=days_ago(1))
dag_task_success = DummyOperator(task_id='dummy', dag=dag)
dag_task_failed = DummyOperator(task_id='dummy2', dag=dag)
initial_task_states = {
dag_task_success.task_id: State.SUCCESS,
dag_task_failed.task_id: State.FAILED,
}
dag_run = self.create_dag_run(dag=dag, state=State.RUNNING, task_states=initial_task_states)
ti_success = dag_run.get_task_instance(dag_task_success.task_id)
ti_failed = dag_run.get_task_instance(dag_task_failed.task_id)
self.assertIn(ti_success.state, State.success_states)
self.assertIn(ti_failed.state, State.failed_states)
def test_delete_dag_run(self):
dag = DAG(dag_id='test_delete_dag_run', start_date=days_ago(1))
dag_run = self.create_dag_run(dag=dag)
session = settings.Session()
session.delete(dag_run)
session.commit()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str,
publisher_name: str,
type: str,
version: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"type": _SERIALIZER.url("type", type, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_types_request(
location: str,
publisher_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_versions_request(
location: str,
publisher_name: str,
type: str,
subscription_id: str,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"type": _SERIALIZER.url("type", type, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
location: str,
publisher_name: str,
type: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineExtensionImage":
"""Gets a virtual machine extension image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.VirtualMachineExtensionImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineExtensionImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
location=location,
publisher_name=publisher_name,
type=type,
version=version,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'} # type: ignore
@distributed_trace
def list_types(
self,
location: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image types.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_types_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
template_url=self.list_types.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_types.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'} # type: ignore
@distributed_trace
def list_versions(
self,
location: str,
publisher_name: str,
type: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineExtensionImage"]:
"""Gets a list of virtual machine extension image versions.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineExtensionImage, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineExtensionImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineExtensionImage"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_versions_request(
location=location,
publisher_name=publisher_name,
type=type,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
orderby=orderby,
template_url=self.list_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineExtensionImage]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_versions.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'} # type: ignore
|
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__doc__="""
Dummy versions of the SPRE classes for the SPU. These can be used to
develop SPU code on platforms without direct SPU support.
"""
import array
import math
import corepy.spre.spe as spe
class ExecParams(object):
def __init__(self):
# $r3
self.addr = None # address of syn code
self.p1 = None
self.p2 = None
self.p3 = None
# $r4
self.size = None # size of syn code
self.p4 = None
self.p5 = None
self.p6 = None
# $r5
self.p7 = None
self.p8 = None
self.p9 = None
self.p10 = None
return
import corepy.arch.spu.isa as spu
import corepy.arch.spu.lib.util as util
# ------------------------------
# Registers
# ------------------------------
class SPURegister(spe.Register): pass
# ------------------------------
# Constants
# ------------------------------
WORD_TYPE = 'I' # array type that corresponds to 1 word
WORD_SIZE = 4 # size in bytes of one word
WORD_BITS = WORD_SIZE * 8 # number of bits in a word
INT_SIZES = {'b':1, 'c':1, 'h':2, 'i':4, 'B':1, 'H':2, 'I':4}
# ------------------------------
# Constants
# ------------------------------
# Parameters - (register, slot)
REG, SLOT = (0, 1)
spu_param_1 = (3, 1)
spu_param_2 = (3, 2)
spu_param_3 = (3, 3)
spu_param_4 = (4, 1)
spu_param_5 = (4, 2)
spu_param_6 = (4, 3)
spu_param_7 = (5, 0)
spu_param_8 = (5, 1)
spu_param_9 = (5, 2)
spu_param_10 = (5, 3)
N_SPUS = 6
# ------------------------------------------------------------
# Aligned Memory
# ------------------------------------------------------------
class aligned_memory(object):
def __init__(self, size, alignment = 128, typecode = 'B'):
print 'Using dummy aligned memory'
self.data = array.array(typecode, range(size))
self.typecode = typecode
return
def __str__(self): return '<aligned_memory typecode = %s addr = 0x%X size = %d ' % (
self.data.typecode, self.get_addr(), self.get_size())
def get_addr(self): return self.data.buffer_info()[0]
def get_size(self): return len(self.data) * INT_SIZES[self.typecode]
def __len__(self):
return self.get_size() / INT_SIZES[self.typecode]
def buffer_info(self):
return (self.get_addr(), self.get_size())
def copy_to(self, source, size):
return
def copy_from(self, dest, size):
return
def word_at(self, index, signed = False):
"""
Minor hack to give fast access to data...
TODO: full array-type interface?
"""
return 0
# ------------------------------------------------------------
# Dummy spe_exec
# ------------------------------------------------------------
class DummyExec(object):
ExecParams = ExecParams
def _make_executable(addr, size): return 0
make_executable = staticmethod(_make_executable)
def _cancel_async(spe_id): return 0
cancel_async = staticmethod(_cancel_async)
def _suspend_async(spe_id): return 0
suspend_async = staticmethod(_suspend_async)
def _resume_async(spe_id): return 0
resume_async = staticmethod(_resume_async)
def _wait_async(spe_id, result): return 0
wait_async = staticmethod(_wait_async)
def _join_async(spe_id): return 0
join_async = staticmethod(_join_async)
def _execute_param_async(addr,params): return 0
execute_param_async = staticmethod(_execute_param_async)
def _execute_async(addr): return 0
execute_async = staticmethod(_execute_async)
def _execute_int(addr): return 0
execute_int = staticmethod(_execute_int)
def _execute_param_int(addr, params): return 0
execute_param_int = staticmethod(_execute_param_int)
def _execute_void(addr): return
execute_void = staticmethod(_execute_void)
def _execute_void(addr, params): return
execute_void = staticmethod(_execute_void)
def _execute_fp(addr): return 0.0
execute_fp = staticmethod(_execute_fp)
def _read_out_mbox(spe_id): return 0
read_out_mbox = staticmethod(_read_out_mbox)
def _stat_out_mbox(spe_id): return 0
stat_out_mbox = staticmethod(_stat_out_mbox)
def _write_in_mbox(spe_id, data): return 0
write_in_mbox = staticmethod(_write_in_mbox)
def _stat_in_mbox(spe_id): return 0
stat_in_mbox = staticmethod(_stat_in_mbox)
def _write_signal(spe_id, signal_reg, data): return 0
write_signal = staticmethod(_write_signal)
def _wait_stop_event(spe_id): return 0
wait_stop_event = staticmethod(_wait_stop_event)
def _spu_putb(speid, ls, ea, size, tag, tid, rid): return 0
spu_putb = staticmethod(_spu_putb)
def _spu_getb(speid, ls, ea, size, tag, tid, rid): return 0
spu_getb = staticmethod(_spu_getb)
def _read_tag_status_all(speid, mask): return 0
read_tag_status_all = staticmethod(_read_tag_status_all)
# ------------------------------------------------------------
# Helpers
# ------------------------------------------------------------
def copy_param(code, target, source):
"""
Copy a parameter from source reg to preferred slot in the target reg.
For params in slot 0, this is just and add immediate.
For params in other slots, the source is rotated.
Note that other values in the source are copied, too.
"""
if source[SLOT] != 0:
code.add(spu.rotqbyi(target, source[REG], source[SLOT] * 4))
else:
code.add(spu.ai(target, source[REG], 0))
return
ALIGN_UP = 0
ALIGN_DOWN = 1
def align_addr(addr, align = 16, dir = ALIGN_DOWN):
"""
Round an address to the nearest aligned address based on align.
Round up or down based on dir.
"""
if dir == ALIGN_DOWN:
return addr - (addr % align)
else:
return addr + (align - addr % align)
# ------------------------------------------------------------
# InstructionStream
# ------------------------------------------------------------
class InstructionStream(spe.InstructionStream):
"""
SPU Instruction Stream.
Two assumptions:
o We have the processor untill we're done
o If we're prempted, the whole state is saved automagically
Based on these and the fact that we are a leaf node, no register
saves are attempted and only the raw instructions stream (no
prologue/epilogue) is used.
"""
# Class attributes
RegisterFiles = (('gp', SPURegister, range(0,128)),)
default_register_type = SPURegister
exec_module = DummyExec
align = 16 # 128 is max efficiency, 16 is what array currently does
instruction_type = WORD_TYPE
def __init__(self, optimize=False):
spe.InstructionStream.__init__(self)
self._optimize = optimize
return
# ------------------------------
# Execute/ABI support
# ------------------------------
def _synthesize_prologue(self):
"""
Setup register 0.
"""
self._prologue = InstructionStream()
# Reserve register r0 for the value zero
self.acquire_register(reg = 0)
util.load_word(self._prologue, 0, 0, zero = False)
return
def _synthesize_epilogue(self):
"""
Do nothing.
"""
return
def cache_code(self):
"""
Add a stop signal with return type 0x2000 (EXIT_SUCCESS) to the
end if the instruction stream. (BE Handbook, p. 422).
"""
# Generate the prologue
self._synthesize_prologue()
# Don't have a real epilogue.
self.add(spu.stop(0x2000))
# self._check_alignment(self._code, 'spu code')
# self.exec_module.make_executable(self._code.buffer_info()[0], len(self._code))
# Append our instructions to the prologue's, first making sure the alignment is correct.
if len(self._prologue._code) % 2 == 1: # Odd number of instructions
self._prologue.add(spu.lnop(0))
self._prologue._code.extend(self._code)
self._prologue._check_alignment(self._prologue._code, 'spu prologue')
self._epilogue = self
self._cached = True
return
def add_return(self):
"""
Do nothing.
"""
return
def add_jump(self, addr):
"""
No nothing.
"""
return
def align_code(self, boundary):
"""
Insert the appropraite nop/lnops to align the next instruction
on the byte boudary. boundary must be a multiple of four.
"""
word_align = boundary / 4
while len(self._code) % word_align:
if len(self._code) % 2 == 0:
self.add(spu.nop(0), True)
else:
self.add(spu.lnop(0), True)
return
def add(self, inst, optimize_override = False):
if not optimize_override and self._optimize:
# binary_string_inst = spu.DecToBin(inst)
op = 'nop'
# if binary_string_inst[0:3] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:3]]
# elif binary_string_inst[0:6] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:6]]
# elif binary_string_inst[0:7] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:7]]
# elif binary_string_inst[0:8] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:8]]
# elif binary_string_inst[0:9] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:9]]
# elif binary_string_inst[0:10] in spu.inst_opcodes:
# op = spu.inst_opcodes[binary_string_inst[0:10]]
pipeline = inst.cycles[0]
if (len(self._code) % 2 == 0) and pipeline == 0:
InstructionStream.add(self, inst)
elif (len(self._code) % 2 == 1) and pipeline == 1:
InstructionStream.add(self, inst)
elif (len(self._code) % 2 == 0) and pipeline == 1:
InstructionStream.add(self, spu.nop(0))
InstructionStream.add(self, inst)
elif (len(self._code) % 2 == 1) and pipeline == 0:
InstructionStream.add(self, spu.lnop(0))
InstructionStream.add(self, inst)
else:
spe.InstructionStream.add(self, inst)
# Invalidate the cache
self._cached = False
return len(self._code)
class ParallelInstructionStream(InstructionStream):
def __init__(self, optimize=False):
InstructionStream.__init__(self, optimize)
self.r_rank = self.acquire_register()
self.r_size = self.acquire_register()
self.r_block_size = None
self.r_offset = None
# All the params are stored in r_rank
self.r_params = self.r_rank
# User/library supplied data size, used by processor to determine
# block and offset for an execution run. This value is in bytes.
self.raw_data_size = None
return
def _synthesize_prologue(self):
"""
Add raw_data_size/offest support code.
"""
InstructionStream._synthesize_prologue(self)
# Parallel parameters are passed in the prefered slot and the next
# slot of the user arugment.
self._prologue.add(spu.shlqbyi(self.r_rank, SPURegister(3, None), 4))
self._prologue.add(spu.shlqbyi(self.r_size, SPURegister(3, None), 8))
if self.raw_data_size is not None:
self.acquire_block_registers()
self._prologue.add(spu.shlqbyi(self.r_block_size, SPURegister(4, None), 4))
self._prologue.add(spu.shlqbyi(self.r_offset, SPURegister(4, None), 8))
else:
print 'no raw data'
return
def acquire_block_registers(self):
if self.r_block_size is None:
self.r_block_size = self.acquire_register()
if self.r_offset is None:
self.r_offset = self.acquire_register()
# print 'offset/block_size', self.r_offset, self.r_block_size
return
def release_parallel_registers(self):
self.release_register(self.r_rank)
self.release_register(self.r_size)
if self.r_block_size is not None:
self.release_register(self.r_block_size)
if self.r_offset is not None:
self.release_register(self.r_offset)
return
def _copy_params(params, rank, size):
"""
Copy params.
"""
ret = ExecParams()
ret.addr = params.addr
ret.p1 = rank
ret.p2 = size
ret.p3 = params.p3
ret.size = params.size
ret.p4 = params.p4
ret.p5 = params.p5
ret.p6 = params.p6
ret.p7 = params.p7
ret.p8 = params.p8
ret.p9 = params.p9
ret.p10 = params.p10
return ret
class Processor(spe.Processor):
exec_module = DummyExec
ExecParams = ExecParams
def execute(self, code, mode = 'int', debug = False, params = None, n_spus = 1):
"""
Execute the instruction stream in the code object.
Execution modes are:
'int' - return the intetger value in register gp_return when
execution is complete
'fp' - return the floating point value in register fp_return
when execution is complete
'void' - return None
'async'- execute the code in a new thread and return the thread
id immediately
If debug is True, the buffer address and code length are printed
to stdout before execution.
ParallelExecutionStream execution:
If code is a ParallelInstructionStream code.n_spus threads are
created and the parameter structure is set up with world_size=n_spus
and rank values for each thread. A list containing the speids is
returned.
If raw_data_size is present and set on the code object, set the
block_size and offset parameters.
The parameters for parallel execution are:
p1 = rank ($r3.2)
p2 = size ($r3.3)
p4 = block_size ($r4.2)
p5 = offset ($r4.3)
"""
if len(code._code) == 0:
return None
# Cache the code here
if not code._cached:
code.cache_code()
# Setup the parameter structure
if params is None:
params = ExecParams()
addr = code._prologue.inst_addr()
params.addr = addr
params.size = len(code._prologue._code) * 4 # size in bytes
retval = None
if type(code) is ParallelInstructionStream:
# Parallel SPU execution
speids = []
if n_spus > 8:
raise Exception("Too many SPUs requests (%d > 8)" % n_spus)
# print 'Regs:', code.r_rank, code.r_size, code.r_block_size, code.r_offset
# Set up the parameters and execute each spu thread
for i in range(n_spus):
pi = _copy_params(params, i, n_spus)
if hasattr(code, "raw_data_size") and code.raw_data_size is not None:
pi.p4 = int(code.raw_data_size / n_spus) # block_size
pi.p5 = pi.p4 * i # offset
# print 'Executing: 0x%x %d %d %d %d' % (pi.addr, pi.p1, pi.p2, pi.p4, pi.p5)
speids.append(spe.Processor.execute(self, code, debug=debug, params=pi, mode='async'))
# Handle blocking execution modes
if mode != 'async':
reterrs = [self.join(speid) for speid in speids]
retval = reterrs
else:
retval = speids
else:
# Single SPU execution
retval = spe.Processor.execute(self, code, mode, debug, params)
return retval
spu_exec = DummyExec
# ------------------------------------------------------------
# Unit tests
# ------------------------------------------------------------
def TestInt():
code = InstructionStream()
proc = Processor()
spu.set_active_code(code)
r13 = code.acquire_register(reg = 13)
r20 = code.acquire_register(reg = 20)
spu.ai(r20, r20, 13)
spu.ai(r13, r13, 13)
spu.ai(r13, r13, 13)
spu.ai(r13, r13, 13)
spu.ai(r13, r13, 13)
spu.ai(r13, r13, 13)
spu.stop(0x200D)
code.print_code()
r = proc.execute(code) # , debug = True)
print 'int result:', r
# while True:
# pass
return
def TestParams():
# Run this with a stop instruction and examine the registers
code = InstructionStream()
proc = Processor()
# code.add(spu.stop(0xA))
code.add(spu.stop(0x200D))
params = ExecParams()
params.p1 = 1
params.p2 = 2
params.p3 = 3
params.p4 = 4
params.p5 = 5
params.p6 = 6
params.p7 = 7
params.p8 = 8
params.p9 = 9
params.p10 = 10
r = proc.execute(code, params = params)
# print 'int result:', r
# while True:
# pass
return
def TestAlignedMemory():
import spuiter
n = 10000
a = array.array('I', range(n))
aa = aligned_memory(len(a), typecode='I')
aa.copy_to(a.buffer_info()[0], len(a))
# aa.print_memory()
print str(aa), '0x%X, %d' % a.buffer_info()
code = InstructionStream()
proc = Processor()
md = spuiter.memory_desc('I')
md.from_array(aa)
print str(md)
md.get(code, 0)
ls = spuiter.memory_desc('I', 0, n)
seq_iter = spuiter.spu_vec_iter(code, ls)
for i in seq_iter:
i.v = i + i
print str(md)
md.put(code, 0)
r = proc.execute(code, mode = 'int')
# print a
aa.copy_from(a.buffer_info()[0], len(a))
# aa.print_memory()
print a[:20]
print a[4090:4105]
print a[8188:8200]
print a[-20:]
return
def TestParallel():
# Run this with a stop instruction and examine the registers and memory
code = ParallelInstructionStream()
proc = Processor()
code.raw_data_size = 128*8
r = code.acquire_register()
code.add(spu.ai(r, r, 0xCAFE))
code.add(spu.ai(r, r, 0xBABE))
code.add(spu.stop(0x2000))
r = proc.execute(code, mode='async', n_spus = 6)
for speid in r:
proc.join(speid)
assert(True)
return
def TestOptimization():
import time
import spuiter
import spuvar
code1 = InstructionStream(optimize=False)
code2 = InstructionStream(optimize=True)
proc = Processor()
for code in [code1, code2]:
x = spuvar.spu_int_var(code, 0)
y = spuvar.spu_int_var(code, 0)
for i in spuiter.syn_iter(code, pow(2, 14)):
x.v = x + x
y.v = y + y
s = time.time()
proc.execute(code)
e = time.time()
print "Total time: ", e - s
print "(First time is withOUT optimization.)"
def TestInt2(i0 = 0, i1 = 1):
i2 = i0 + i1
i3 = i1 + i2
code = InstructionStream()
proc = Processor()
r_loop = 4
r_address = 5
r0 = 6
r1 = 7
r2 = 8
r3 = 9
# Load arguments into a quadword
#################
# Pack quadword #
#################
def load_value_int32(code, reg, value, clear = False):
# obviously, value should be 32 bit integer
code.add(spu.ilhu(reg, value / pow(2, 16))) # immediate load halfword upper
code.add(spu.iohl(reg, value % pow(2, 16))) # immediate or halfword lower
if clear:
code.add(spu.shlqbyi(reg, reg, 12)) # shift left qw by bytes, clears right bytes
return
load_value_int32(code, r0, i0, True)
load_value_int32(code, r1, i1, True)
code.add(spu.rotqbyi(r1, r1, 12)) # rotate qw by bytes
load_value_int32(code, r2, i2, True)
code.add(spu.rotqbyi(r2, r2, 8))
load_value_int32(code, r3, i3, True)
code.add(spu.rotqbyi(r3, r3, 4))
code.add(spu.a(r0, r0, r1))
code.add(spu.a(r0, r0, r2))
code.add(spu.a(r0, r0, r3))
##########
# Main loop to calculate Fibnoccai sequence
load_value_int32(code, r_address, pow(2, 16), clear_bits = False) # start at 64K
load_value_int32(code, r_loop, 0, clear_bits = False)
start_label = code.size() + 1
code.add(spu.sfi(r_loop, r_loop, 1))
code.add(spu.brnz(r_loop, (-(next - start_label) * spu.WORD_SIZE)))
#
code.add(spu.stop(0x2005))
r = proc.execute(code)
# assert(r == 12)
# print 'int result:', r
return
if __name__ == '__main__':
TestInt()
TestParams()
TestParallel()
# TestOptimization()
# TestAlignedMemory()
|
|
import unittest
import string
from tests import *
from binding import Context
from contenthandling import ContentHandler
import generators
class TestsTest(unittest.TestCase):
""" Testing for basic rest test methods """
def test_parse_test(self):
""" Test basic ways of creating test objects from input object structure """
#Most basic case
input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}
test = Test.parse_test('',input)
self.assertTrue(test.url == input['url'])
self.assertTrue(test.method == input['method'])
self.assertTrue(test.name == input['NAME'])
self.assertTrue(test.group == input['group'])
self.assertTrue(test.body == input['body'])
#Test headers match
self.assertFalse( set(test.headers.values()) ^ set(input['headers'].values()) )
#Happy path, only gotcha is that it's a POST, so must accept 200 or 204 response code
input = {"url": "/ping", "meThod": "POST"}
test = Test.parse_test('',input)
self.assertTrue(test.url == input['url'])
self.assertTrue(test.method == input['meThod'])
self.assertTrue(test.expected_status == [200,201,204])
# Authentication
input = {"url": "/ping", "method": "GET", "auth_username" : "foo", "auth_password": "bar"}
test = Test.parse_test('',input)
self.assertTrue(test.auth_username == input['auth_username'])
self.assertTrue(test.auth_password == input['auth_password'])
self.assertTrue(test.expected_status == [200])
#Test that headers propagate
input = {"url": "/ping", "method": "GET", "headers" : [{"Accept":"application/json"},{"Accept-Encoding":"gzip"}] }
test = Test.parse_test('',input)
expected_headers = {"Accept":"application/json","Accept-Encoding":"gzip"}
self.assertTrue(test.url == input['url'])
self.assertTrue(test.method == 'GET')
self.assertTrue(test.expected_status == [200])
self.assertTrue(isinstance(test.headers,dict))
#Test no header mappings differ
self.assertFalse( set(test.headers.values()) ^ set(expected_headers.values()) )
#Test expected status propagates and handles conversion to integer
input = [{"url": "/ping"},{"name": "cheese"},{"expected_status":["200",204,"202"]}]
test = Test.parse_test('',input)
self.assertTrue(test.name == "cheese")
self.assertTrue(test.expected_status == [200,204,202])
self.assertFalse(test.is_context_modifier())
def test_parse_test_templated_headers(self):
""" Test parsing with templated headers """
heads = {"Accept":"Application/json", "$AuthHeader":"$AuthString"}
templated_heads = {"Accept":"Application/json", "apikey":"magic_passWord"}
context = Context()
context.bind_variables({'AuthHeader': 'apikey', 'AuthString':'magic_passWord'})
# If this doesn't throw errors we have silent failures
input_invalid = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": 'goat'}
try:
test = Test.parse_test('', input_invalid)
test.fail("Expected error not thrown")
except TypeError:
pass
def assert_dict_eq(dict1, dict2):
""" Test dicts are equal """
self.assertEqual(2, len(set(dict1.items()) & set(dict2.items())))
# Before templating is used
input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": heads}
test = Test.parse_test('', input)
assert_dict_eq(heads, test.headers)
assert_dict_eq(heads, test.get_headers(context=context))
# After templating applied
input_templated = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": {'tEmplate': heads}}
test2 = Test.parse_test('', input_templated)
assert_dict_eq(heads, test2.get_headers())
assert_dict_eq(templated_heads, test2.get_headers(context=context))
def test_parse_test_validators(self):
""" Test that for a test it can parse the validators section correctly """
input = {"url": '/test', 'validators' : [
{'comparator': {
'jsonpath_mini': 'key.val',
'comparator': 'eq',
'expected': 3
}},
{'extract_test': {'jsonpath_mini': 'key.val', 'test':'exists'}}
]}
test = Test.parse_test('',input)
self.assertTrue(test.validators)
self.assertEqual(2, len(test.validators))
self.assertTrue(isinstance(test.validators[0], validators.ComparatorValidator))
self.assertTrue(isinstance(test.validators[1], validators.ExtractTestValidator))
# Check the validators really work
self.assertTrue(test.validators[0].validate('{"id": 3, "key": {"val": 3}}'))
def test_parse_validators_fail(self):
""" Test an invalid validator syntax throws exception """
input = {"url": '/test', 'validators' : ['comparator']}
try:
test = Test.parse_test('', input)
self.fail("Should throw exception if not giving a dictionary-type comparator")
except TypeError:
pass
def test_parse_extractor_bind(self):
""" Test parsing of extractors """
test_config = {
"url": '/api',
'extract_binds': {
'id': {'jsonpath_mini': 'idfield'},
'name': {'jsonpath_mini': 'firstname'}
}
}
test = Test.parse_test('', test_config)
self.assertTrue(test.extract_binds)
self.assertEqual(2, len(test.extract_binds))
self.assertTrue('id' in test.extract_binds)
self.assertTrue('name' in test.extract_binds)
# Test extractors config'd correctly for extraction
myjson = '{"idfield": 3, "firstname": "bob"}'
extracted = test.extract_binds['id'].extract(myjson)
self.assertEqual(3, extracted)
extracted = test.extract_binds['name'].extract(myjson)
self.assertEqual('bob', extracted)
def test_parse_extractor_errors(self):
""" Test that expected errors are thrown on parsing """
test_config = {
"url": '/api',
'extract_binds': {'id': {}}
}
try:
test = Test.parse_test('', test_config)
self.fail("Should throw an error when doing empty mapping")
except TypeError:
pass
test_config['extract_binds']['id'] = {
'jsonpath_mini': 'query',
'test':'anotherquery'
}
try:
test = Test.parse_test('', test_config)
self.fail("Should throw an error when given multiple extractors")
except ValueError as te:
pass
def test_parse_validator_comparator(self):
""" Test parsing a comparator validator """
test_config = {
'name': 'Default',
'url': '/api',
'validators': [
{'comparator':{'jsonpath_mini': 'id',
'comparator': 'eq',
'expected': {'template': '$id'}}}
]
}
test = Test.parse_test('', test_config)
self.assertTrue(test.validators)
self.assertEqual(1, len(test.validators))
context = Context()
context.bind_variable('id', 3)
myjson = '{"id": "3"}'
failure = test.validators[0].validate(myjson, context=context)
self.assertTrue(test.validators[0].validate(myjson, context=context))
self.assertFalse(test.validators[0].validate(myjson))
def test_parse_validator_extract_test(self):
""" Tests parsing extract-test validator """
test_config = {
'name': 'Default',
'url': '/api',
'validators': [
{'extract_test':{'jsonpath_mini': 'login',
'test': 'exists'}}
]
}
test = Test.parse_test('', test_config)
self.assertTrue(test.validators)
self.assertEqual(1, len(test.validators))
myjson = '{"login": "testval"}'
self.assertTrue(test.validators[0].validate(myjson))
def test_variable_binding(self):
""" Test that tests successfully bind variables """
element = 3
input = [{"url": "/ping"},{"name": "cheese"},{"expected_status":["200",204,"202"]}]
input.append({"variable_binds":{'var':'value'}})
test = Test.parse_test('', input)
binds = test.variable_binds
self.assertEqual(1, len(binds))
self.assertEqual('value', binds['var'])
# Test that updates context correctly
context = Context()
test.update_context_before(context)
self.assertEqual('value', context.get_value('var'))
self.assertTrue(test.is_context_modifier())
def test_test_url_templating(self):
test = Test()
test.set_url('$cheese', isTemplate=True)
self.assertTrue(test.is_dynamic())
self.assertEqual('$cheese', test.get_url())
self.assertTrue(test.templates['url'])
context = Context()
context.bind_variable('cheese', 'stilton')
self.assertEqual('stilton', test.get_url(context=context))
realized = test.realize(context)
self.assertEqual('stilton', realized.url)
def test_test_content_templating(self):
test = Test()
handler = ContentHandler()
handler.is_template_content = True
handler.content = '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "$login"}'
context = Context()
context.bind_variables({'id':9, 'login':'kvothe'})
test.set_body(handler)
templated = test.realize(context=context)
self.assertEqual(string.Template(handler.content).safe_substitute(context.get_values()),
templated.body)
def test_header_templating(self):
test = Test()
head_templated = {'$key': "$val"}
context = Context()
context.bind_variables({'key': 'cheese', 'val':'gouda'})
# No templating applied
test.headers = head_templated
head = test.get_headers()
self.assertEqual(1, len(head))
self.assertEqual('$val', head['$key'])
test.set_headers(head_templated, isTemplate=True)
self.assertTrue(test.templates)
self.assertTrue(test.NAME_HEADERS in test.templates)
# No context, no templating
head = test.headers
self.assertEqual(1, len(head))
self.assertEqual('$val', head['$key'])
# Templated with context
head = test.get_headers(context=context)
self.assertEqual(1, len(head))
self.assertEqual('gouda', head['cheese'])
def test_update_context_variables(self):
test = Test()
context = Context()
context.bind_variable('foo','broken')
test.variable_binds = {'foo':'correct', 'test':'value'}
test.update_context_before(context)
self.assertEqual('correct', context.get_value('foo'))
self.assertEqual('value', context.get_value('test'))
def test_update_context_generators(self):
""" Test updating context variables using generator """
test = Test()
context = Context()
context.bind_variable('foo','broken')
test.variable_binds = {'foo': 'initial_value'}
test.generator_binds = {'foo': 'gen'}
context.add_generator('gen', generators.generator_basic_ids())
test.update_context_before(context)
self.assertEqual(1, context.get_value('foo'))
test.update_context_before(context)
self.assertEqual(2, context.get_value('foo'))
if __name__ == '__main__':
unittest.main()
|
|
import cgi
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
from myadmin import *
from utils import *
from chichar import *
from sentence import *
from word import *
from viewstat import *
from chartest import *
from drawing import *
from book import *
from modelutils import *
from formatutils import *
from maintemplates import *
# [BEGIN main_page]
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write('<html><body>')
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
if not user == None and user.email() == ADMIN_ID:
self.response.write(MAIN_PAGE_ADMIN_TEMPLATE % (url, url_linktext))
else:
self.response.write(MAIN_PAGE_USER_TEMPLATE % (url, url_linktext))
self.response.write('</body></html>')
# [END main_page]
# [BEGIN main_page]
class MainSearch(webapp2.RequestHandler):
def post(self):
self.response.write('<html><body>')
string = self.request.get('searchquery')
if string == "":
string = ""
chars = ""
words = ""
sentences = ""
else:
chichars_query = Chichar.query(Chichar.chichar == string)
qresult = chichars_query.fetch()
if len(qresult) > 0:
chars = tableclickchichars(qresult)
else:
chichars_query = Chichar.query(Chichar.translation == string)
qresult = chichars_query.fetch()
if len(qresult) > 0:
chars = tableclickchichars(qresult)
else:
chichars_query = Chichar.query(Chichar.pronunciation == string)
qresult = chichars_query.fetch()
if len(qresult) > 0:
chars = tableclickchichars(qresult)
else:
if len(string) == 1:
chars = "char unknown, to be added"
else:
chars = "no result"
words = "TODO"
sentences = "TODO"
self.response.write( SEARCH_GENERAL % (string, chars, words, sentences ) )
self.response.write('</body></html>')
# [END main_page]
# [BEGIN main_page]
class MainLoad(webapp2.RequestHandler):
def get(self):
self.response.write('<html><body>')
self.response.write(LOAD_GENERAL)
self.response.write('</body></html>')
# [END main_page]
# [BEGIN main_page]
class DoMainLoad(webapp2.RequestHandler):
def post(self):
self.response.write('<html><body>')
for dataline in self.request.get('dataentry').split("\n"):
if len(dataline) > 0:
parts = dataline.split(";")
chichar = parts[0].strip()
translation = parts[1].strip()
pronunciation = parts[2].strip()
if len(chichar) == 1:
# this is a new char
checkaddchar(self,chichar,translation,pronunciation)
else:
if len(translation.split(" ")) > 1:
# this is a sentence
checkaddsentence(self,chichar,translation,pronunciation)
else:
checkaddword(self,chichar,translation,pronunciation)
self.response.write('</body></html>')
# [END main_page]
# [BEGIN main_page]
class MainClear(webapp2.RequestHandler):
def post(self):
clearchichars(self)
clearwords(self)
clearsentences(self)
user = users.get_current_user()
if user:
clearbooks(self)
clearviewstats(self)
clearchartests(self)
self.redirect("/")
# [END main_page]
app = webapp2.WSGIApplication([
('/', MainPage),
('/mainsearch', MainSearch),
('/mainsearch/(.*)', MainSearch),
('/mainload', MainLoad),
('/domainload', DoMainLoad),
('/mainclear', MainClear),
('/listchichars', ListChiChar),
('/addchichar', AddChiChar),
('/doaddchichar', DoAddChiChar),
('/viewchichar/(.*)', ViewChiChar),
('/editchichar/(.*)', EditChiChar),
('/savechichar/(.*)', SaveChiChar),
('/deletechichar/(.*)', DeleteChiChar),
('/clearchichars', ClearChiChars),
('/statchichars', StatChiChars),
('/chicharsentences/(.*)', ChiCharSentences),
('/strokechichar/(.*)', StrokeChiChar),
('/savestrokechichar/(.*)', SaveStrokeChiChar),
('/clearstrokechichar/(.*)', ClearStrokeChiChar),
('/exportchichars', ExportChiChars),
('/listsentences', ListSentences),
('/addsentence', AddSentence),
('/doaddsentence', DoAddSentence),
('/viewsentence/(.*)', ViewSentence),
('/editsentence/(.*)', EditSentence),
('/savesentence/(.*)', SaveSentence),
('/deletesentence/(.*)', DeleteSentence),
('/clearsentences', ClearSentences),
('/statsentences', StatSentences),
('/listwords', ListWords),
('/addword', AddWord),
('/doaddword', DoAddWord),
('/viewword/(.*)', ViewWord),
('/editword/(.*)', EditWord),
('/saveword/(.*)', SaveWord),
('/deleteword/(.*)', DeleteWord),
('/clearwords', ClearWords),
('/statwords', StatWords),
('/char2pinyintest', Char2PinyinTest),
('/def2chartest', Def2CharTest),
('/checkchar2pinyintest', CheckChar2PinyinTest),
('/checkdef2chartest', CheckDef2CharTest),
('/answerdef2chartest/(.*)', AnswerDef2CharTest),
('/charteststats', CharTestStats),
('/listviewstats', ListViewStats),
('/clearviewstats', ClearViewStats),
('/listbooks', ListBooks),
('/clearbooks', ClearBooks),
('/loadbookpage', LoadBookPage),
('/loadbook', LoadBook),
('/deletebook/(.*)', DeleteBook),
('/viewbook/(.*)', ViewBook),
('/learnbook/(.*)', LearnBook),
('/statbooks', StatBooks),
('/exportbook/(.*)', ExportBook),
], debug=True)
|
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.7.2"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
try:
delattr(obj.__class__, self.name)
except AttributeError:
# probably deleted in multiple threads
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
try:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
except NameError:
pass
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
|
import argparse
import numpy as np
import pyx
import suspect
import os
from cubric_mrs import voxel, table
# define some useful global parameters
voxel_outline_color = pyx.color.rgb(1, 1, 0)
voxel_outline_width = pyx.style.linewidth(1.0)
plot_linewidth = pyx.style.linewidth(0.01)
simple_plot_color = pyx.color.rgb(0, 0, 1)
sr_plot_color = pyx.color.rgb(1, 0, 0)
plot_linewidth_gaba = pyx.style.linewidth(0.03)
plot_color_gaba = pyx.color.rgb(0, 1, 0)
plot_color_residual = pyx.color.cmyk.Gray
plot_color_fit = pyx.color.rgb.red
plot_color_data = pyx.color.rgb.black
min_ppm = 0.2
max_ppm = 5.0
def analyse_press(data_path, t1_path=None, wref_path=None, out_path=None, out_csv=None, subject_id=None):
data = suspect.io.load_twix(data_path)[:, 1]
if subject_id is None:
subject_id = data.metadata["patient_id"]
num_channels = data.shape[-2]
noise = np.moveaxis(data[:, :, -250:], 2, 0).reshape(num_channels, -1)
white_data = suspect.processing.channel_combination.whiten(data, noise)
if wref_path is not None:
wref = suspect.io.load_twix(wref_path)[:, 1]
white_wref = suspect.processing.channel_combination.whiten(wref, noise)
channel_weights = suspect.processing.channel_combination.svd_weighting(np.mean(white_wref, axis=0))
cc_wref = suspect.processing.channel_combination.combine_channels(white_wref, channel_weights)
# assume no frequency drift over water signal, and doesn't matter for singlet peak anyway
wref_final = np.mean(cc_wref, axis=0)
# calculate the eddy current from the water signal
ec = np.unwrap(np.angle(wref_final))
# smooth the eddy current signal
ec_smooth = suspect.processing.denoising.sliding_gaussian(ec, 32)
ecc = np.exp(-1j * ec_smooth)
wref_final *= ecc
white_data *= ecc
else:
channel_weights = suspect.processing.channel_combination.svd_weighting(np.mean(white_data, axis=0))
cc_data = suspect.processing.channel_combination.combine_channels(white_data, channel_weights)
# re-bin into phase cycles and average over each cycle
pc_data = np.mean(cc_data.reshape(cc_data.shape[0] // 16, 16, -1), axis=1)
def correct_frequency_sr(target):
def correct_fid(fid):
frequency_shift, phase_shift = suspect.processing.frequency_correction.spectral_registration(fid, target)
return fid.adjust_frequency(-frequency_shift).adjust_phase(-phase_shift)
return correct_fid
sr_data = np.mean(np.apply_along_axis(correct_frequency_sr(pc_data[0]), 1, pc_data), axis=0)
one_page_canvas = pyx.canvas.canvas()
files_dict = {
"PRESS": os.path.basename(data_path)
}
if wref_path is not None:
files_dict["WREF"] = os.path.basename(wref_path)
if t1_path is not None:
files_dict["T1"] = os.path.basename(t1_path)
file_table = table.file_table(files_dict)
one_page_canvas.insert(file_table,
[pyx.trafo.translate(1, 28)])
if t1_path is not None:
t1_path = os.path.abspath(t1_path)
if os.path.splitext(t1_path)[1].upper() in [".IMA", ".DCM"]:
t1 = suspect.image.load_dicom_volume(t1_path)
elif os.path.splitext(t1_path)[1].upper() in [".NII"]:
t1 = suspect.image.load_nifti(t1_path)
elif t1_path.upper().endswith(".NII.GZ"):
t1 = suspect.image.load_nifti(t1_path)
else:
print("could not load t1 from {}".format(os.path.splitext(t1_path)[1].upper()))
exit(-1)
voxel_canvases = voxel.get_voxel_slices(t1, data)
segmentation = voxel.segment_voxel(t1_path, suspect.image.create_mask(data, t1))
one_page_canvas.insert(voxel_canvases,
[pyx.trafo.translate(1, 28 - 0.5 - voxel_canvases.bbox().height())])
water = wref_final if wref_path is not None else None
tarquin_results = suspect.io.tarquin.process(sr_data, water, options={
"w_conc": segmentation["water_conc"],
"w_att": segmentation["water_att"]
})
tarquin_plot = plot_fitted_spectrum(tarquin_results["plots"]["data"],
tarquin_results["plots"]["fit"] + tarquin_results["plots"]["baseline"])
one_page_canvas.insert(tarquin_plot,
[pyx.trafo.translate(1, 28 - 1 - voxel_canvases.bbox().height() - tarquin_plot.height)])
conc_table = table.metabolite_table(tarquin_results["metabolite_fits"],
float(tarquin_results["metabolite_fits"]["TCr"]["concentration"]),
"mM" if wref_path is not None else "A.U.")
one_page_canvas.insert(conc_table,
[pyx.trafo.translate(11, 28 - 1 - voxel_canvases.bbox().height())])
quality_table = table.quality_table(tarquin_results["quality"])
#print(conc_table.bbox())
one_page_canvas.insert(quality_table,
[pyx.trafo.translate(11, 28 - conc_table.bbox().height() - 1.5 - voxel_canvases.bbox().height())])
properties_table = table.voxel_properties_table(segmentation)
one_page_canvas.insert(properties_table,
[pyx.trafo.translate(1, 28 - 2 - voxel_canvases.bbox().height() - tarquin_plot.height)])
output_sheet = pyx.document.page(one_page_canvas,
paperformat=pyx.document.paperformat.A4,
centered=False)
output_document = pyx.document.document([output_sheet])
output_document.writePDFfile(out_path)
if out_csv is not None:
output_csv(out_csv, subject_id, tarquin_results["metabolite_fits"])
def plot_fitted_spectrum(data, fit):
data_range = np.amax(data.real) - np.amin(data.real)
gaba_plot = pyx.graph.graphxy(width=9, height=6,
x=pyx.graph.axis.linear(min=max_ppm, max=min_ppm),
y=pyx.graph.axis.linear(
min=np.amin(data.real) - 0.3 * data_range,
max=np.amax(data.real) + 0.1 * data_range,
parter=None
),
key=pyx.graph.key.key(pos="tr")
)
gaba_plot.plot(pyx.graph.data.values(
x=data.frequency_axis_ppm(),
y=data.spectrum().real,
title="Data"
),
[pyx.graph.style.line([plot_linewidth, plot_color_data])]
)
gaba_plot.plot(pyx.graph.data.values(
x=data.frequency_axis_ppm(),
y=fit.real,
title="Fit"
),
[pyx.graph.style.line([plot_linewidth, plot_color_fit])]
)
gaba_plot.plot(pyx.graph.data.values(
x=data.frequency_axis_ppm(),
y=(data.real - fit.real - data_range * 0.15),
title="Residual"
),
[pyx.graph.style.line([plot_linewidth, plot_color_residual])]
)
return gaba_plot
def output_csv(filename, subject_id, concentrations):
with open(filename, 'a') as fout:
if fout.tell() == 0:
# this is a new file, have to output the header line
fout.write("SubjectID")
for metabolite_name in sorted(concentrations.keys()):
fout.write(", {0}, {0}_SD".format(metabolite_name))
fout.write("\n")
# now write the actual concentration values
fout.write("{}".format(subject_id))
for name, data in sorted(concentrations.items()):
fout.write(", {}, {}".format(data["concentration"], data["sd"]))
def press_script():
parser = argparse.ArgumentParser()
parser.add_argument("--t1",
help="path to the T1 structural image in Nifti or DICOM format",
default=None)
parser.add_argument("--press",
help="path to the press twix file",
required=True)
parser.add_argument("--wref",
help="path to the water reference twix file",
default=None)
parser.add_argument("--out_pdf",
help="path to save the one-page pdf output",
default="out.pdf")
parser.add_argument("--out_csv",
help="path to save the concentration csv file",
default=None)
parser.add_argument("--id",
help="override the patient id in output files",
default=None)
args = parser.parse_args()
analyse_press(args.press,
t1_path=args.t1,
wref_path=args.wref,
out_path=args.out_pdf,
out_csv=args.out_csv,
subject_id=args.id)
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid as stdlib_uuid
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import views
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
from nova import wsgi
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
'v2.1': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
"version": "2.11",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1",
}
],
}
}
def _get_self_href(response):
"""Extract the URL to self from response data."""
data = jsonutils.loads(response.body)
for link in data['versions'][0]['links']:
if link['rel'] == 'self':
return link['href']
return ''
class VersionsTestV20(test.NoDBTestCase):
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v2.1",
"status": "CURRENT",
"version": "2.11",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def _test_get_version_2_detail(self, url, accept=None):
if accept is None:
accept = "application/json"
req = webob.Request.blank(url)
req.accept = accept
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail(self):
self._test_get_version_2_detail('/v2/')
def test_get_version_2_detail_content_type(self):
accept = "application/json;version=2"
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
req = webob.Request.blank('/v2/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(actual, expected)
def test_generate_href_v21(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2.1/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2.1')
self.assertEqual(actual, expected)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(actual, expected)
# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
# So this class tests "/v2.1" only for v2.1 API.
class VersionsTestV21(test.NoDBTestCase):
exp_versions = copy.deepcopy(EXP_VERSIONS)
exp_versions['v2.0']['links'].insert(0,
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.0']}
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
req = webob.Request.blank('/v2.1/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 404)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
class VersionBehindSslTestCase(test.NoDBTestCase):
def setUp(self):
super(VersionBehindSslTestCase, self).setUp()
self.flags(secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO')
def test_versions_without_headers(self):
req = wsgi.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('http://'))
def test_versions_with_header(self):
req = wsgi.Request.blank('/')
req.accept = "application/json"
req.headers['X-Forwarded-Proto'] = 'https'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('https://'))
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.convolution.utils import discretize_model
from astropy.modeling.functional_models import (
Gaussian1D, Box1D, RickerWavelet1D, Gaussian2D, Box2D, RickerWavelet2D)
from astropy.modeling.tests.example_models import models_1D, models_2D
from astropy.modeling.tests.test_models import create_model
try:
import scipy # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
modes = ['center', 'linear_interp', 'oversample']
test_models_1D = [Gaussian1D, Box1D, RickerWavelet1D]
test_models_2D = [Gaussian2D, Box2D, RickerWavelet2D]
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes)))
def test_pixel_sum_1D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box1D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_1D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode)
assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_1D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian1D.eval().
"""
model = Gaussian1D(1, 0, 20)
x = np.arange(-100, 101)
values = model(x)
disc_values = discretize_model(model, (-100, 101), mode=mode)
assert_allclose(values, disc_values, atol=0.001)
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes)))
def test_pixel_sum_2D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_2D[model_class]['x_lim'],
models_2D[model_class]['y_lim'], mode=mode)
assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_2D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian2D.eval()
"""
model = Gaussian2D(0.01, 0, 0, 1, 1)
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode)
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian_eval_2D_integrate_mode():
"""
Discretize Gaussian with integrate mode
"""
model_list = [Gaussian2D(.01, 0, 0, 2, 2),
Gaussian2D(.01, 0, 0, 1, 2),
Gaussian2D(.01, 0, 0, 2, 1)]
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
for model in model_list:
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate')
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_1D():
"""
Test subpixel accuracy of the integrate mode with gaussian 1D model.
"""
gauss_1D = Gaussian1D(1, 0, 0.1)
values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_2D():
"""
Test subpixel accuracy of the integrate mode with gaussian 2D model.
"""
gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1)
values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001)
def test_discretize_callable_1d():
"""
Test discretize when a 1d function is passed.
"""
def f(x):
return x ** 2
y = discretize_model(f, (-5, 6))
assert_allclose(y, np.arange(-5, 6) ** 2)
def test_discretize_callable_2d():
"""
Test discretize when a 2d function is passed.
"""
def f(x, y):
return x ** 2 + y ** 2
actual = discretize_model(f, (-5, 6), (-5, 6))
y, x = (np.indices((11, 11)) - 5)
desired = x ** 2 + y ** 2
assert_allclose(actual, desired)
def test_type_exception():
"""
Test type exception.
"""
with pytest.raises(TypeError) as exc:
discretize_model(float(0), (-10, 11))
assert exc.value.args[0] == 'Model must be callable.'
def test_dim_exception_1d():
"""
Test dimension exception 1d.
"""
def f(x):
return x ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10, 11))
assert exc.value.args[0] == "y range specified, but model is only 1-d."
def test_dim_exception_2d():
"""
Test dimension exception 2d.
"""
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11))
assert exc.value.args[0] == "y range not specified, but model is 2-d"
def test_float_x_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper and lower"
" limit of 'x_range' must be a whole number.")
def test_float_y_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper and lower"
" limit of 'y_range' must be a whole number.")
def test_discretize_oversample():
gauss_2D = Gaussian2D(amplitude=1.0, x_mean=5.,
y_mean=125., x_stddev=0.75, y_stddev=3)
values = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode='oversample', factor=10)
vmax = np.max(values)
vmax_yx = np.unravel_index(values.argmax(), values.shape)
values_osf1 = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode='oversample', factor=1)
values_center = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode = 'center')
assert values.shape == (35, 10)
assert_allclose(vmax, 0.927, atol=1e-3)
assert vmax_yx == (25, 5)
assert_allclose(values_center, values_osf1)
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import struct
import unittest
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import time
import random
import math
from roslib.message import SerializationError
class TestGenmsgPy(unittest.TestCase):
def test_PythonKeyword(self):
from test_rospy.msg import PythonKeyword
# the md5sum is pulled from the c++ message generator. The
# test here is that the Python msg generator didn't
# accidentally mutate a md5sum based on a message that has its
# fieldname remapped.
self.assertEquals(PythonKeyword._md5sum, "1330d6bbfad8e75334346fec949d5133")
## Utility for testing roundtrip serialization
## @param orig Message to test roundtrip serialization of
## @param blank Uninitialized instance of message to deserialize into
## @param float bool: if True, use almostEquals instead of equals
## comparison. This variant assumes only data field is named
## 'data'
def _test_ser_deser(self, orig, blank, float=False):
b = StringIO()
orig.serialize(b)
blank.deserialize(b.getvalue())
if not float:
self.assertEquals(orig, blank, str(orig)+" != "+str(blank))
else:
self.assertAlmostEquals(orig.data, blank.data, 5)
## #2133/2152
def test_test_rospy_TransitiveImport(self):
from test_rospy.msg import TransitiveImport
m = TransitiveImport()
# invoking serialize should be enough to expose issue. The bug
# was that genmsg_py was failing to include the imports of
# embedded messages. Because messages are flattened, this
# causes ImportErrors.
self._test_ser_deser(m, TransitiveImport())
def test_test_rospy_TestFixedArray(self):
from test_rospy.msg import TestFixedArray
m = TestFixedArray()
self.assertEquals([0.], m.f32_1)
self.assertEquals([0., 0., 0.], m.f32_3)
self.assertEquals([0.], m.f64_1)
self.assertEquals([0., 0., 0.], m.f64_3)
self.assertEquals([0], m.i8_1)
self.assertEquals([0, 0, 0], m.i8_3)
self.assertEquals(chr(0), m.u8_1)
self.assertEquals(chr(0)*3, m.u8_3)
self.assertEquals([0], m.i32_1)
self.assertEquals([0, 0, 0], m.i32_3)
self.assertEquals([0], m.u32_1)
self.assertEquals([0, 0, 0], m.u32_3)
self.assertEquals([''], m.s_1)
self.assertEquals(['', '', ''], m.s_3)
self._test_ser_deser(m, TestFixedArray())
m = TestFixedArray(i32_1 = [1])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((1,), c.i32_1)
m = TestFixedArray(i32_3 = [-3, 2, 10])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((-3, 2, 10), c.i32_3)
m = TestFixedArray(u32_1 = [1234])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((1234,), c.u32_1)
m = TestFixedArray(u32_3 = [3, 2, 10])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((3, 2, 10), c.u32_3)
# this could potentially fail due to floating point lossiness
m,c = TestFixedArray(f32_1 = [2.]), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((2.,), c.f32_1)
m,c = TestFixedArray(f32_3 = [1., 2., 3.]), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals((1., 2., 3.), c.f32_3)
m,c = TestFixedArray(u8_1 = 'x'), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals('x', c.u8_1)
m,c = TestFixedArray(u8_3 = 'xyz'), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals('xyz', c.u8_3)
m,c = TestFixedArray(s_1 = ['']), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals([''], c.s_1)
m,c = TestFixedArray(s_1 = ['blah blah blah']), TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals(['blah blah blah',], c.s_1)
m = TestFixedArray(s_3 = ['', 'x', 'xyz'])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals(['', 'x', 'xyz'], c.s_3)
for v in [True, False]:
m = TestFixedArray(b_1 = [v])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals([v], c.b_1)
m = TestFixedArray(b_3 = [True, False, True])
c = TestFixedArray()
self._test_ser_deser(m, c)
self.assertEquals([True, False, True], c.b_3)
#TODO: enable tests for auto-convert of uint8[] to string
def test_test_rospy_TestConstants(self):
from test_rospy.msg import TestConstants
self.assertEquals(-123.0, TestConstants.A)
self.assertEquals(124.0, TestConstants.B)
self.assertEquals(125.0, TestConstants.C)
self.assertEquals(123, TestConstants.X)
self.assertEquals(-123, TestConstants.Y)
self.assertEquals(124, TestConstants.Z)
self.assertEquals("'hi", TestConstants.SINGLEQUOTE)
self.assertEquals('"hello" there', TestConstants.DOUBLEQUOTE)
self.assertEquals('"hello" \'goodbye\'', TestConstants.MULTIQUOTE)
self.assertEquals('foo', TestConstants.FOO)
self.assertEquals('"#comments" are ignored, and leading and trailing whitespace removed',TestConstants.EXAMPLE)
self.assertEquals('strip', TestConstants.WHITESPACE)
self.assertEquals('', TestConstants.EMPTY)
self.assertEquals(True, TestConstants.TRUE)
self.assertEquals(False, TestConstants.FALSE)
def test_std_msgs_empty(self):
from std_msgs.msg import Empty
self.assertEquals(Empty(), Empty())
self._test_ser_deser(Empty(), Empty())
def test_std_msgs_Bool(self):
from std_msgs.msg import Bool
self.assertEquals(Bool(), Bool())
self._test_ser_deser(Bool(), Bool())
# default value should be False
self.assertEquals(False, Bool().data)
# test various constructor permutations
for v in [True, False]:
self.assertEquals(Bool(v), Bool(v))
self.assertEquals(Bool(v), Bool(data=v))
self.assertEquals(Bool(data=v), Bool(data=v))
self.assertNotEquals(Bool(True), Bool(False))
self._test_ser_deser(Bool(True), Bool())
self._test_ser_deser(Bool(False), Bool())
# validate type cast to bool
blank = Bool()
b = StringIO()
Bool(True).serialize(b)
blank.deserialize(b.getvalue())
self.assert_(blank.data)
self.assert_(type(blank.data) == bool)
b = StringIO()
Bool(True).serialize(b)
blank.deserialize(b.getvalue())
self.assert_(blank.data)
self.assert_(type(blank.data) == bool)
def test_std_msgs_String(self):
from std_msgs.msg import String
self.assertEquals(String(), String())
self.assertEquals('', String().data)
# default value should be empty string
self.assertEquals(String(''), String())
self.assertEquals(String(''), String(''))
self.assertEquals(String('foo'), String('foo'))
self.assertEquals(String('foo'), String(data='foo'))
self.assertEquals(String(data='foo'), String(data='foo'))
self.assertNotEquals(String('foo'), String('bar'))
self.assertNotEquals(String('foo'), String(data='bar'))
self.assertNotEquals(String(data='foo'), String(data='bar'))
self._test_ser_deser(String(''), String())
self._test_ser_deser(String('a man a plan a canal panama'), String())
def test_std_msgs_SignedInt(self):
from std_msgs.msg import Int8, Int16, Int32, Int64
for cls in [Int8, Int16, Int32, Int64]:
v = random.randint(1, 127)
self.assertEquals(cls(), cls())
self.assertEquals(0, cls().data)
self.assertEquals(cls(), cls(0))
self.assertEquals(cls(0), cls(0))
self.assertEquals(cls(v), cls(v))
self.assertEquals(cls(-v), cls(-v))
self.assertEquals(cls(v), cls(data=v))
self.assertEquals(cls(data=v), cls(data=v))
self.assertNotEquals(cls(v), cls())
self.assertNotEquals(cls(data=v), cls(data=-v))
self.assertNotEquals(cls(data=v), cls(data=v-1))
self.assertNotEquals(cls(data=v), cls(v-1))
self.assertNotEquals(cls(v), cls(v-1))
self._test_ser_deser(cls(), cls())
self._test_ser_deser(cls(0), cls())
self._test_ser_deser(cls(-v), cls())
self._test_ser_deser(cls(v), cls())
# rospy currently does not spot negative overflow due to the fact that Python's struct doesn't either
widths = [(8, Int8), (16, Int16), (32, Int32), (64, Int64)]
for w, cls in widths:
maxp = long(math.pow(2, w-1)) - 1
maxn = -long(math.pow(2, w-1)) + 1
self._test_ser_deser(cls(maxp), cls())
self._test_ser_deser(cls(maxn), cls())
try:
cls(maxp+1)._check_types()
self.fail("check_types should have noted width error[%s]: %s, %s"%(w, maxp+1, cls.__name__))
except SerializationError: pass
try:
cls(maxn-1)._check_types()
self.fail("check_types should have noted width error[%s]: %s, %s"%(w, maxn-1, cls.__name__))
except SerializationError: pass
def test_std_msgs_UnsignedInt(self):
from std_msgs.msg import UInt8, UInt16, UInt32, UInt64
for cls in [UInt8, UInt16, UInt32, UInt64]:
v = random.randint(1, 127)
self.assertEquals(cls(), cls())
self.assertEquals(0, cls().data)
self.assertEquals(cls(), cls(0))
self.assertEquals(cls(0), cls(0))
self.assertEquals(cls(v), cls(v))
self.assertEquals(cls(v), cls(data=v))
self.assertEquals(cls(data=v), cls(data=v))
self.assertNotEquals(cls(v), cls())
self.assertNotEquals(cls(data=v), cls(data=-v))
self.assertNotEquals(cls(data=v), cls(data=v-1))
self.assertNotEquals(cls(data=v), cls(v-1))
self.assertNotEquals(cls(v), cls(v-1))
self._test_ser_deser(cls(), cls())
self._test_ser_deser(cls(0), cls())
self._test_ser_deser(cls(v), cls())
try:
cls(-1)._check_types()
self.fail("check_types should have noted sign error[%s]: %s"%(w, cls.__name__))
except SerializationError: pass
# rospy currently does not spot negative overflow due to the fact that Python's struct doesn't either
widths = [(8, UInt8), (16, UInt16), (32, UInt32), (64, UInt64)]
for w, cls in widths:
maxp = long(math.pow(2, w)) - 1
self._test_ser_deser(cls(maxp), cls())
try:
cls(maxp+1)._check_types()
self.fail("check_types should have noted width error[%s]: %s, %s"%(w, maxp+1, cls.__name__))
except SerializationError: pass
def test_std_msgs_Float(self):
from std_msgs.msg import Float32, Float64
for cls in [Float32, Float64]:
self.assertEquals(cls(), cls())
self.assertEquals(0., cls().data)
self.assertEquals(cls(), cls(0.))
self.assertEquals(cls(0.), cls(0.))
self.assertEquals(cls(1.), cls(1.))
self.assertEquals(cls(1.), cls(data=1.))
self.assertEquals(cls(data=1.), cls(data=1.))
self.assertEquals(cls(math.pi), cls(math.pi))
self.assertEquals(cls(math.pi), cls(data=math.pi))
self.assertEquals(cls(data=math.pi), cls(data=math.pi))
self.assertNotEquals(cls(1.), cls())
self.assertNotEquals(cls(math.pi), cls())
self.assertNotEquals(cls(data=math.pi), cls(data=-math.pi))
self.assertNotEquals(cls(data=math.pi), cls(data=math.pi-1))
self.assertNotEquals(cls(data=math.pi), cls(math.pi-1))
self.assertNotEquals(cls(math.pi), cls(math.pi-1))
self._test_ser_deser(cls(), cls())
self._test_ser_deser(cls(0.), cls())
self._test_ser_deser(cls(1.), cls(), float=True)
self._test_ser_deser(cls(math.pi), cls(), float=True)
def test_std_msgs_MultiArray(self):
# multiarray is good test of embed plus array type
from std_msgs.msg import Int32MultiArray, MultiArrayDimension, MultiArrayLayout, UInt8MultiArray
dims = [MultiArrayDimension('foo', 1, 2), MultiArrayDimension('bar', 3, 4),\
MultiArrayDimension('foo2', 5, 6), MultiArrayDimension('bar2', 7, 8)]
for d in dims:
self.assertEquals(d, d)
# there was a bug with UInt8 arrays, so this is a regression
# test. the buff was with the uint8[] type consistency
buff = StringIO()
self.assertEquals(UInt8MultiArray(),UInt8MultiArray())
self.assertEquals('',UInt8MultiArray().data)
UInt8MultiArray().serialize(buff)
self.assertEquals(UInt8MultiArray(layout=MultiArrayLayout()),UInt8MultiArray())
UInt8MultiArray(layout=MultiArrayLayout()).serialize(buff)
data = ''.join([chr(i) for i in range(0, 100)])
v = UInt8MultiArray(data=data)
self._test_ser_deser(UInt8MultiArray(data=data),UInt8MultiArray())
self.assertEquals(Int32MultiArray(),Int32MultiArray())
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout()),Int32MultiArray())
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(), data=[1, 2, 3]),Int32MultiArray(data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(),data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(dim=[]), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(),data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout([], 0), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(),data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(dim=[], data_offset=0), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(),data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(dim=dims, data_offset=0), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(dim=dims),data=[1, 2, 3]))
self.assertEquals(Int32MultiArray(layout=MultiArrayLayout(dims, 10), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(dim=dims,data_offset=10),data=[1, 2, 3]))
self.assertNotEquals(Int32MultiArray(data=[1, 2, 3]),Int32MultiArray(data=[4,5,6]))
self.assertNotEquals(Int32MultiArray(layout=MultiArrayLayout([], 1), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout([], 0),data=[1, 2, 3]))
self.assertNotEquals(Int32MultiArray(layout=MultiArrayLayout([], 1), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(dim=[]),data=[1, 2, 3]))
self.assertNotEquals(Int32MultiArray(layout=MultiArrayLayout(dims, 10), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(dim=dims,data_offset=11),data=[1, 2, 3]))
self.assertNotEquals(Int32MultiArray(layout=MultiArrayLayout(dim=dims, data_offset=10), data=[1, 2, 3]),\
Int32MultiArray(layout=MultiArrayLayout(dim=dims[1:],data_offset=10),data=[1, 2, 3]))
self._test_ser_deser(Int32MultiArray(),Int32MultiArray())
self._test_ser_deser(Int32MultiArray(layout=MultiArrayLayout()),Int32MultiArray())
self._test_ser_deser(Int32MultiArray(data=[1, 2, 3]),Int32MultiArray())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.