gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import iso8601
import netaddr
import six
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
class KeyTypeError(TypeError):
def __init__(self, expected, value):
super(KeyTypeError, self).__init__(
_('Key %(key)s must be of type %(expected)s not %(actual)s'
) % {'key': repr(value),
'expected': expected.__name__,
'actual': value.__class__.__name__,
})
class ElementTypeError(TypeError):
def __init__(self, expected, key, value):
super(ElementTypeError, self).__init__(
_('Element %(key)s:%(val)s must be of type %(expected)s'
' not %(actual)s'
) % {'key': key,
'val': repr(value),
'expected': expected,
'actual': value.__class__.__name__,
})
class AbstractFieldType(six.with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def coerce(self, obj, attr, value):
"""This is called to coerce (if possible) a value on assignment.
This method should convert the value given into the designated type,
or throw an exception if this is not possible.
:param:obj: The NovaObject on which an attribute is being set
:param:attr: The name of the attribute being set
:param:value: The value being set
:returns: A properly-typed value
"""
pass
@abc.abstractmethod
def from_primitive(self, obj, attr, value):
"""This is called to deserialize a value.
This method should deserialize a value from the form given by
to_primitive() to the designated type.
:param:obj: The NovaObject on which the value is to be set
:param:attr: The name of the attribute which will hold the value
:param:value: The serialized form of the value
:returns: The natural form of the value
"""
pass
@abc.abstractmethod
def to_primitive(self, obj, attr, value):
"""This is called to serialize a value.
This method should serialize a value to the form expected by
from_primitive().
:param:obj: The NovaObject on which the value is set
:param:attr: The name of the attribute holding the value
:param:value: The natural form of the value
:returns: The serialized form of the value
"""
pass
@abc.abstractmethod
def describe(self):
"""Returns a string describing the type of the field."""
pass
class FieldType(AbstractFieldType):
@staticmethod
def coerce(obj, attr, value):
return value
@staticmethod
def from_primitive(obj, attr, value):
return value
@staticmethod
def to_primitive(obj, attr, value):
return value
def describe(self):
return self.__class__.__name__
class UnspecifiedDefault(object):
pass
class Field(object):
def __init__(self, field_type, nullable=False, default=UnspecifiedDefault):
self._type = field_type
self._nullable = nullable
self._default = default
@property
def nullable(self):
return self._nullable
@property
def default(self):
return self._default
def _null(self, obj, attr):
if self.nullable:
return None
elif self._default != UnspecifiedDefault:
# NOTE(danms): We coerce the default value each time the field
# is set to None as our contract states that we'll let the type
# examine the object and attribute name at that time.
return self._type.coerce(obj, attr, self._default)
else:
raise ValueError(_("Field `%s' cannot be None") % attr)
def coerce(self, obj, attr, value):
"""Coerce a value to a suitable type.
This is called any time you set a value on an object, like:
foo.myint = 1
and is responsible for making sure that the value (1 here) is of
the proper type, or can be sanely converted.
This also handles the potentially nullable or defaultable
nature of the field and calls the coerce() method on a
FieldType to actually do the coercion.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being set
:param:value: The value being set
:returns: The properly-typed value
"""
if value is None:
return self._null(obj, attr)
else:
return self._type.coerce(obj, attr, value)
def from_primitive(self, obj, attr, value):
"""Deserialize a value from primitive form.
This is responsible for deserializing a value from primitive
into regular form. It calls the from_primitive() method on a
FieldType to do the actual deserialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being deserialized
:param:value: The value to be deserialized
:returns: The deserialized value
"""
if value is None:
return None
else:
return self._type.from_primitive(obj, attr, value)
def to_primitive(self, obj, attr, value):
"""Serialize a value to primitive form.
This is responsible for serializing a value to primitive
form. It calls to_primitive() on a FieldType to do the actual
serialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being serialized
:param:value: The value to be serialized
:returns: The serialized value
"""
if value is None:
return None
else:
return self._type.to_primitive(obj, attr, value)
def describe(self):
"""Return a short string describing the type of this field."""
name = self._type.describe()
prefix = self.nullable and 'Nullable' or ''
return prefix + name
class String(FieldType):
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should really try to avoid the need to do this
if isinstance(value, (six.string_types, int, long, float,
datetime.datetime)):
return unicode(value)
else:
raise ValueError(_('A string is required here, not %s') %
value.__class__.__name__)
class UUID(FieldType):
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should actually verify the UUIDness here
return str(value)
class Integer(FieldType):
@staticmethod
def coerce(obj, attr, value):
return int(value)
class Float(FieldType):
def coerce(self, obj, attr, value):
return float(value)
class Boolean(FieldType):
@staticmethod
def coerce(obj, attr, value):
return bool(value)
class DateTime(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, six.string_types):
value = timeutils.parse_isotime(value)
elif not isinstance(value, datetime.datetime):
raise ValueError(_('A datetime.datetime is required here'))
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, timeutils.parse_isotime(value))
@staticmethod
def to_primitive(obj, attr, value):
return timeutils.isotime(value)
class IPAddress(FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPAddress(value)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
@staticmethod
def to_primitive(obj, attr, value):
return str(value)
class IPV4Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4:
raise ValueError(_('Network "%s" is not valid') % value)
return result
class IPV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 6:
raise ValueError(_('Network "%s" is not valid') % value)
return result
class IPV4AndV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4 and result.version != 6:
raise ValueError(_('Network "%s" is not valid') % value)
return result
class IPNetwork(IPAddress):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
class IPV4Network(IPNetwork):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=4)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
class IPV6Network(IPNetwork):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=6)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
class CompoundFieldType(FieldType):
def __init__(self, element_type, **field_args):
self._element_type = Field(element_type, **field_args)
class List(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, list):
raise ValueError(_('A list is required here'))
for index, element in enumerate(list(value)):
value[index] = self._element_type.coerce(
obj, '%s[%i]' % (attr, index), element)
return value
def to_primitive(self, obj, attr, value):
return [self._element_type.to_primitive(obj, attr, x) for x in value]
def from_primitive(self, obj, attr, value):
return [self._element_type.from_primitive(obj, attr, x) for x in value]
class Dict(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, dict):
raise ValueError(_('A dict is required here'))
for key, element in value.items():
if not isinstance(key, six.string_types):
#NOTE(guohliu) In order to keep compatibility with python3
#we need to use six.string_types rather than basestring here,
#since six.string_types is a tuple, so we need to pass the
#real type in.
raise KeyTypeError(six.string_types[0], key)
value[key] = self._element_type.coerce(
obj, '%s["%s"]' % (attr, key), element)
return value
def to_primitive(self, obj, attr, value):
primitive = {}
for key, element in value.items():
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), element)
return primitive
def from_primitive(self, obj, attr, value):
concrete = {}
for key, element in value.items():
concrete[key] = self._element_type.from_primitive(
obj, '%s["%s"]' % (attr, key), element)
return concrete
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
super(Object, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if obj_name != self._obj_name:
raise ValueError(_('An object of type %s is required here') %
self._obj_name)
return value
@staticmethod
def to_primitive(obj, attr, value):
return value.obj_to_primitive()
@staticmethod
def from_primitive(obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from nova.objects import base as obj_base
return obj_base.NovaObject.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
class NetworkModel(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, network_model.NetworkInfo):
return value
elif isinstance(value, six.string_types):
# Hmm, do we need this?
return network_model.NetworkInfo.hydrate(value)
else:
raise ValueError(_('A NetworkModel is required here'))
@staticmethod
def to_primitive(obj, attr, value):
return value.json()
@staticmethod
def from_primitive(obj, attr, value):
return network_model.NetworkInfo.hydrate(value)
class AutoTypedField(Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
class StringField(AutoTypedField):
AUTO_TYPE = String()
class UUIDField(AutoTypedField):
AUTO_TYPE = UUID()
class IntegerField(AutoTypedField):
AUTO_TYPE = Integer()
class FloatField(AutoTypedField):
AUTO_TYPE = Float()
class BooleanField(AutoTypedField):
AUTO_TYPE = Boolean()
class DateTimeField(AutoTypedField):
AUTO_TYPE = DateTime()
class IPAddressField(AutoTypedField):
AUTO_TYPE = IPAddress()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class IPNetworkField(AutoTypedField):
AUTO_TYPE = IPNetwork()
class IPV4NetworkField(AutoTypedField):
AUTO_TYPE = IPV4Network()
class IPV6NetworkField(AutoTypedField):
AUTO_TYPE = IPV6Network()
class DictOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(String())
class DictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = Dict(String(), nullable=True)
class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
super(ObjectField, self).__init__(**kwargs)
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = List(Object(objtype))
super(ListOfObjectsField, self).__init__(**kwargs)
| |
'''
Given a hypotheses graph and weights, this script tries to find split points where there are not many mergers,
splits the graph into N parts, and tracks them independently.
'''
from __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# standard imports
try:
import commentjson as json
except ImportError:
import json
import logging
import copy
import configargparse as argparse
import numpy as np
import networkx as nx
import time
import hytra.core.jsongraph
import concurrent.futures
def _getLogger():
''' logger to be used in this module '''
return logging.getLogger("split-track-stitch")
def track(model, weights, solver='flow'):
''' solver may be flow or ilp '''
if solver == 'flow':
import dpct
return dpct.trackFlowBased(model, weights)
else:
try:
import multiHypoTracking_with_cplex as mht
except ImportError:
try:
import multiHypoTracking_with_gurobi as mht
except ImportError:
raise ImportError("Could not find multi hypotheses tracking ilp solver")
return mht.track(model, weights)
def trackAndContractSubmodel(submodel, weights, modelIdx, solver):
try:
_getLogger().info("Tracking submodel {}".format(modelIdx))
result = track(submodel, weights, solver)
linksByIdTuple = {}
for l in submodel['linkingHypotheses']:
linksByIdTuple[(l['src'], l['dest'])] = l
detectionsById = {}
for d in submodel['segmentationHypotheses']:
detectionsById[d['id']] = d
tracklets = []
links = []
nodeIdRemapping = {}
valuePerDetection = {}
divisionsPerDetection = {}
# find connected components of graph where edges are only inserted if the value of the nodes agrees with the value along the link
# at divisions we do not insert the links so that lineage trees are no connected components.
g = nx.Graph()
for d in result['detectionResults']:
valuePerDetection[d['id']] = d['value']
divisionsPerDetection[d['id']] = False # initialize, will be overwritten below if it is given
if d['value'] > 0:
g.add_node(d['id'])
for d in result['divisionResults']:
divisionsPerDetection[d['id']] = d['value']
for l in result['linkingResults']:
s, d = l['src'], l['dest']
if l['value'] > 0 and divisionsPerDetection[s] is False and valuePerDetection[s] == l['value'] and valuePerDetection[d] == l['value']:
g.add_edge(s, d)
# for every connected component, insert a node into the stitching graph
connectedComponents = nx.connected_components(g)
_getLogger().info("Contracting tracks of submodel {}".format(modelIdx))
for c in connectedComponents:
# sum over features of dets + links
linksInTracklet = [idTuple for idTuple in linksByIdTuple.keys() if idTuple[0] in c and idTuple[1] in c]
linkFeatures = [linksByIdTuple[idTuple]['features'] for idTuple in linksInTracklet]
detFeatures = [detectionsById[i]['features'] for i in c]
accumulatedFeatures = np.sum([hytra.core.jsongraph.delistify(f) for f in linkFeatures + detFeatures], axis=0)
# Get tracklet ids from nodes at start and end times of tracklets
minTime = None
maxTime = None
for n in c:
if maxTime is None or detectionsById[n]['nid'][0] > maxTime:
maxTime = detectionsById[n]['nid'][0]
maxTrackletId = n
if minTime is None or detectionsById[n]['nid'][0] < minTime:
minTime = detectionsById[n]['nid'][0]
minTrackletId = n
contractedNode = {
'id' : minTrackletId,
'contains' : set(c),
'links' : linksInTracklet,
'nid' : detectionsById[minTrackletId]['nid'],
'minUid' : minTrackletId,
'maxUid' : maxTrackletId,
'features' : hytra.core.jsongraph.listify(accumulatedFeatures),
'timestep' : [minTime, maxTime]
}
if 'appearanceFeatures' in detectionsById[minTrackletId]:#min(c)]:
contractedNode['appearanceFeatures'] = detectionsById[minTrackletId]['appearanceFeatures']
if 'disappearanceFeatures' in detectionsById[maxTrackletId]:#max(c)
contractedNode['disappearanceFeatures'] = detectionsById[maxTrackletId]['disappearanceFeatures']
if 'divisionFeatures' in detectionsById[maxTrackletId]:
contractedNode['divisionFeatures'] = detectionsById[maxTrackletId]['divisionFeatures']
tracklets.append(contractedNode)
for n in c:
nodeIdRemapping[n] = minTrackletId
# add the remaining links to the stitching graph with adjusted source and destination
for l in result['linkingResults']:
s, d = l['src'], l['dest']
if l['value'] > 0 and (valuePerDetection[s] != l['value'] or valuePerDetection[d] != l['value'] or divisionsPerDetection[s]):
newL = {
'src' : nodeIdRemapping[s],
'dest' : nodeIdRemapping[d],
'features' : linksByIdTuple[(s, d)]['features']
}
links.append(newL)
_getLogger().info("Found divisions at {}".format([k for k, v in divisionsPerDetection.items() if v is True]))
return modelIdx, result, links, tracklets, nodeIdRemapping, valuePerDetection, sum(1 for v in divisionsPerDetection.values() if v is True)
except:
_getLogger().exception('Exception while processing submodel')
def main(args):
assert args.solver in ['flow', 'ilp'], "Invalid Solver selected"
with open(args.model_filename, 'r') as f:
model = json.load(f)
with open(args.weights_filename, 'r') as f:
weights = json.load(f)
_getLogger().info("Done loading model and weights")
traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hytra.core.jsongraph.getMappingsBetweenUUIDsAndTraxels(model)
assert not any(len(u2t) > 1 for u2t in uuidToTraxelMap.values()), "Doesn't work with tracklets yet!"
detectionTimestepTuples = [(timestepIdTuple, entry) for entry in model['segmentationHypotheses'] for timestepIdTuple in uuidToTraxelMap[int(entry['id'])]]
detectionsPerTimestep = {}
for timestep_id, detection in detectionTimestepTuples:
detectionsPerTimestep.setdefault(int(timestep_id[0]), []).append(detection)
nonSingletonCostsPerFrame = []
detectionsById = {}
linksByIdTuple = {}
_getLogger().info("Setup done. Searching for good split locations...")
for t in detectionsPerTimestep.keys():
nonSingletonCosts = []
for d in detectionsPerTimestep[t]:
detectionsById[d['id']] = d
d['nid'] = uuidToTraxelMap[d['id']][0]
f = d['features'][:]
del f[1]
nonSingletonCosts.extend(f)
nonSingletonCostsPerFrame.append(min(nonSingletonCosts)[0])
for l in model['linkingHypotheses']:
linksByIdTuple[(l['src'], l['dest'])] = l
# create a list of the sum of 2 neighboring elements (has len = len(nonSingletonCostsPerFrame) - 1)
nonSingletonCostsPerFrameGap = [i + j for i, j in zip(nonSingletonCostsPerFrame[:-1], nonSingletonCostsPerFrame[1:])]
# for debugging: show which frames could be interesting. The higher the value, the more are all objects in the frame true detections.
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(nonSingletonCostsPerFrame)
# plt.show()
firstFrame = min(detectionsPerTimestep.keys())
lastFrame = max(detectionsPerTimestep.keys())
numSplits = args.num_splits
numFramesPerSplit = (lastFrame - firstFrame) // numSplits
# find points where TWO consecutive frames have a low merger score together!
# find split points in a range of 10 frames before/after the desired split location
# TODO: also consider divisions!
splitPoints = []
border = 10
if numFramesPerSplit < border*2:
border = 1
for s in range(1, numSplits):
desiredSplitPoint = s * numFramesPerSplit
subrange = np.array(nonSingletonCostsPerFrameGap[desiredSplitPoint - border : desiredSplitPoint + border])
splitPoints.append(desiredSplitPoint - border + np.argmax(subrange))
_getLogger().info("Going to split hypotheses graph at frames {}".format(splitPoints))
# for debugging: show chosen frames
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(nonSingletonCostsPerFrame)
# plt.scatter(splitPoints, [nonSingletonCostsPerFrame[s] for s in splitPoints])
# plt.show()
# split graph
def getSubmodel(startTime, endTime):
# for each split: take detections from detectionsPerTimestep, store a list of the uuids, then add links by filtering for the uuids
# also make sure that appearance/disappearance costs are zero at the beginning/end of each submodel
# TODO: tracklets that reach over the gap must be split, or put into just one submodel but connected to the other side?
submodel = {}
segmentationHypotheses = []
for f in range(startTime, endTime):
if f == startTime:
for d in detectionsPerTimestep[f]:
newD = copy.deepcopy(d)
newD['appearanceFeatures'] = [[0.0000001 * sum(range(i+1))] for i in range(len(d['features']))]
segmentationHypotheses.append(newD)
elif f+1 == endTime:
for d in detectionsPerTimestep[f]:
newD = copy.deepcopy(d)
newD['disappearanceFeatures'] = [[0.0000001 * sum(range(i+1))] for i in range(len(d['features']))]
segmentationHypotheses.append(newD)
else:
segmentationHypotheses.extend(detectionsPerTimestep[f])
submodel['segmentationHypotheses'] = segmentationHypotheses
uuidsInSubmodel = set([d['id'] for f in range(startTime, endTime) for d in detectionsPerTimestep[f]])
submodel['linkingHypotheses'] = [l for l in model['linkingHypotheses'] if (l['src'] in uuidsInSubmodel) and (l['dest'] in uuidsInSubmodel)]
submodel['divisionHypotheses'] = []
submodel['settings'] = model['settings']
return submodel
submodels = []
lastSplit = 0
splitPoints.append(lastFrame) # so that we get the last split as well
for splitPoint in splitPoints:
_getLogger().info("Creating submodel from t={} to t={}...".format(lastSplit, splitPoint + 1))
submodels.append(getSubmodel(lastSplit, splitPoint + 1))
_getLogger().info("\t contains {} nodes and {} edges".format(len(submodels[-1]['segmentationHypotheses']), len(submodels[-1]['linkingHypotheses'])))
lastSplit = splitPoint + 1
# We will track in parallel now.
# how to merge results?
# make detection weight higher, or accumulate energy over tracks (but what to do with mergers then?),
# or contract everything where source-node, link and destination have the same number of objects?
# We choose the last option.
_getLogger().info("Tracking in parallel and contracting tracks for stitching")
results = []
tracklets = []
links = []
stitchingModel = {'segmentationHypotheses': tracklets, 'linkingHypotheses': links, 'divisionHypotheses' : [], 'settings' : model['settings']}
nodeIdRemapping = {}
valuePerDetection = {}
numDivisions = 0
with concurrent.futures.ProcessPoolExecutor() as executor:
# 1st pass for region features
jobs = []
for i, submodel in enumerate(submodels):
jobs.append(executor.submit(trackAndContractSubmodel,
submodel,
weights,
i,
args.solver
))
for job in concurrent.futures.as_completed(jobs):
idx, r, l, t, n, v, nd = job.result()
_getLogger().info("Finished tracking submodel {}".format(idx))
results.append(r) # can be randomly ordered!
links.extend(l)
tracklets.extend(t)
nodeIdRemapping.update(n)
valuePerDetection.update(v)
numDivisions += nd
_getLogger().info("\tgot {} links from within the submodels".format(len(links)))
_getLogger().info("\tfound {} divisions within the submodels".format(numDivisions))
# insert all edges crossing the splits that connect active detections
detectionIdsPerTimestep = dict( [(k, [d['id'] for d in v]) for k, v in detectionsPerTimestep.items()])
for idTuple, link in linksByIdTuple.items():
s, d = idTuple
for splitPoint in splitPoints[:-1]:
if s in detectionIdsPerTimestep[splitPoint] and d in detectionIdsPerTimestep[splitPoint + 1] and valuePerDetection[s] > 0 and valuePerDetection[d] > 0:
newL = copy.deepcopy(link)
newL['src'] = nodeIdRemapping[s]
newL['dest'] = nodeIdRemapping[d]
links.append(newL)
_getLogger().info("\tcontains {} nodes and {} edges".format(len(tracklets), len(links)))
# hytra.core.jsongraph.writeToFormattedJSON('/Users/chaubold/Desktop/stitchingGraph.json', stitchingModel)
stitchingModel['settings']['allowLengthOneTracks'] = True
stitchingResult = track(stitchingModel, weights, args.solver)
# hytra.core.jsongraph.writeToFormattedJSON('/Users/chaubold/Desktop/stitchingResult.json', stitchingResult)
_getLogger().info("Extracting stitched result...")
# extract full result
trackletsById = dict([(t['id'], t) for t in tracklets])
fullResult = {'detectionResults' : [], 'linkingResults' : [], 'divisionResults' : []}
t0 = time.time()
for dr in stitchingResult['detectionResults']:
v = dr['value']
t = trackletsById[dr['id']]
if v > 0:
for originalUuid in t['contains']:
fullResult['detectionResults'].append({'id': originalUuid, 'value': v})
for s, d in t['links']:
fullResult['linkingResults'].append({'src': s, 'dest' : d, 'value': v})
else:
_getLogger().debug("Skipped detection {} while stitching!".format(t))
for lr in stitchingResult['linkingResults']:
v = lr['value']
st = trackletsById[lr['src']]
dt = trackletsById[lr['dest']]
if v > 0:
fullResult['linkingResults'].append({'src': st['maxUid'], 'dest' : dt['minUid'], 'value': v})
for dr in stitchingResult['divisionResults']:
v = dr['value']
t = trackletsById[dr['id']]
fullResult['divisionResults'].append({'id': t['maxUid'], 'value': v})
t1 = time.time()
_getLogger().info("Extracting result took {} secs".format(t1-t0))
_getLogger().info("Saving stitched result to {}".format(args.results_filename))
hytra.core.jsongraph.writeToFormattedJSON(args.results_filename, fullResult)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', is_config_file=True, help='config file path')
parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--weights-json-file', required=True, type=str, dest='weights_filename',
help='Filename of the json file containing weights')
parser.add_argument('--out-json-file', required=True, type=str, dest='results_filename',
help='Filename where to store the results after tracking as JSON')
parser.add_argument('--num-splits', required=True, type=int, dest='num_splits',
help='Into how many pieces the tracking problem should be split')
parser.add_argument('--solver', default='flow', type=str, dest='solver',
help='Solver to use, may be "flow" or "ilp"')
parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
args, unknown = parser.parse_known_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
_getLogger().debug("Ignoring unknown parameters: {}".format(unknown))
args.solver = args.solver.lower()
main(args)
| |
from __future__ import annotations
import asyncio
from enum import Enum
from inspect import isawaitable
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from sanic_routing import BaseRouter, Route, RouteGroup # type: ignore
from sanic_routing.exceptions import NotFound # type: ignore
from sanic_routing.utils import path_to_parts # type: ignore
from sanic.exceptions import InvalidSignal
from sanic.log import error_logger, logger
from sanic.models.handler_types import SignalHandler
class Event(Enum):
SERVER_INIT_AFTER = "server.init.after"
SERVER_INIT_BEFORE = "server.init.before"
SERVER_SHUTDOWN_AFTER = "server.shutdown.after"
SERVER_SHUTDOWN_BEFORE = "server.shutdown.before"
HTTP_LIFECYCLE_BEGIN = "http.lifecycle.begin"
HTTP_LIFECYCLE_COMPLETE = "http.lifecycle.complete"
HTTP_LIFECYCLE_EXCEPTION = "http.lifecycle.exception"
HTTP_LIFECYCLE_HANDLE = "http.lifecycle.handle"
HTTP_LIFECYCLE_READ_BODY = "http.lifecycle.read_body"
HTTP_LIFECYCLE_READ_HEAD = "http.lifecycle.read_head"
HTTP_LIFECYCLE_REQUEST = "http.lifecycle.request"
HTTP_LIFECYCLE_RESPONSE = "http.lifecycle.response"
HTTP_ROUTING_AFTER = "http.routing.after"
HTTP_ROUTING_BEFORE = "http.routing.before"
HTTP_LIFECYCLE_SEND = "http.lifecycle.send"
HTTP_MIDDLEWARE_AFTER = "http.middleware.after"
HTTP_MIDDLEWARE_BEFORE = "http.middleware.before"
RESERVED_NAMESPACES = {
"server": (
Event.SERVER_INIT_AFTER.value,
Event.SERVER_INIT_BEFORE.value,
Event.SERVER_SHUTDOWN_AFTER.value,
Event.SERVER_SHUTDOWN_BEFORE.value,
),
"http": (
Event.HTTP_LIFECYCLE_BEGIN.value,
Event.HTTP_LIFECYCLE_COMPLETE.value,
Event.HTTP_LIFECYCLE_EXCEPTION.value,
Event.HTTP_LIFECYCLE_HANDLE.value,
Event.HTTP_LIFECYCLE_READ_BODY.value,
Event.HTTP_LIFECYCLE_READ_HEAD.value,
Event.HTTP_LIFECYCLE_REQUEST.value,
Event.HTTP_LIFECYCLE_RESPONSE.value,
Event.HTTP_ROUTING_AFTER.value,
Event.HTTP_ROUTING_BEFORE.value,
Event.HTTP_LIFECYCLE_SEND.value,
Event.HTTP_MIDDLEWARE_AFTER.value,
Event.HTTP_MIDDLEWARE_BEFORE.value,
),
}
def _blank():
...
class Signal(Route):
...
class SignalGroup(RouteGroup):
...
class SignalRouter(BaseRouter):
def __init__(self) -> None:
super().__init__(
delimiter=".",
route_class=Signal,
group_class=SignalGroup,
stacking=True,
)
self.ctx.loop = None
def get( # type: ignore
self,
event: str,
condition: Optional[Dict[str, str]] = None,
):
extra = condition or {}
try:
group, param_basket = self.find_route(
f".{event}",
self.DEFAULT_METHOD,
self,
{"__params__": {}, "__matches__": {}},
extra=extra,
)
except NotFound:
message = "Could not find signal %s"
terms: List[Union[str, Optional[Dict[str, str]]]] = [event]
if extra:
message += " with %s"
terms.append(extra)
raise NotFound(message % tuple(terms))
# Regex routes evaluate and can extract params directly. They are set
# on param_basket["__params__"]
params = param_basket["__params__"]
if not params:
# If param_basket["__params__"] does not exist, we might have
# param_basket["__matches__"], which are indexed based matches
# on path segments. They should already be cast types.
params = {
param.name: param_basket["__matches__"][idx]
for idx, param in group.params.items()
}
return group, [route.handler for route in group], params
async def _dispatch(
self,
event: str,
context: Optional[Dict[str, Any]] = None,
condition: Optional[Dict[str, str]] = None,
fail_not_found: bool = True,
reverse: bool = False,
) -> Any:
try:
group, handlers, params = self.get(event, condition=condition)
except NotFound as e:
if fail_not_found:
raise e
else:
if self.ctx.app.debug and self.ctx.app.state.verbosity >= 1:
error_logger.warning(str(e))
return None
events = [signal.ctx.event for signal in group]
for signal_event in events:
signal_event.set()
if context:
params.update(context)
signals = group.routes
if not reverse:
signals = signals[::-1]
try:
for signal in signals:
params.pop("__trigger__", None)
if (
(condition is None and signal.ctx.exclusive is False)
or (
condition is None
and not signal.handler.__requirements__
)
or (condition == signal.handler.__requirements__)
) and (signal.ctx.trigger or event == signal.ctx.definition):
maybe_coroutine = signal.handler(**params)
if isawaitable(maybe_coroutine):
retval = await maybe_coroutine
if retval:
return retval
elif maybe_coroutine:
return maybe_coroutine
return None
finally:
for signal_event in events:
signal_event.clear()
async def dispatch(
self,
event: str,
*,
context: Optional[Dict[str, Any]] = None,
condition: Optional[Dict[str, str]] = None,
fail_not_found: bool = True,
inline: bool = False,
reverse: bool = False,
) -> Union[asyncio.Task, Any]:
dispatch = self._dispatch(
event,
context=context,
condition=condition,
fail_not_found=fail_not_found and inline,
reverse=reverse,
)
logger.debug(f"Dispatching signal: {event}")
if inline:
return await dispatch
task = asyncio.get_running_loop().create_task(dispatch)
await asyncio.sleep(0)
return task
def add( # type: ignore
self,
handler: SignalHandler,
event: str,
condition: Optional[Dict[str, Any]] = None,
exclusive: bool = True,
) -> Signal:
event_definition = event
parts = self._build_event_parts(event)
if parts[2].startswith("<"):
name = ".".join([*parts[:-1], "*"])
trigger = self._clean_trigger(parts[2])
else:
name = event
trigger = ""
if not trigger:
event = ".".join([*parts[:2], "<__trigger__>"])
handler.__requirements__ = condition # type: ignore
handler.__trigger__ = trigger # type: ignore
signal = super().add(
event,
handler,
name=name,
append=True,
) # type: ignore
signal.ctx.exclusive = exclusive
signal.ctx.trigger = trigger
signal.ctx.definition = event_definition
return cast(Signal, signal)
def finalize(self, do_compile: bool = True, do_optimize: bool = False):
self.add(_blank, "sanic.__signal__.__init__")
try:
self.ctx.loop = asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError("Cannot finalize signals outside of event loop")
for signal in self.routes:
signal.ctx.event = asyncio.Event()
return super().finalize(do_compile=do_compile, do_optimize=do_optimize)
def _build_event_parts(self, event: str) -> Tuple[str, str, str]:
parts = path_to_parts(event, self.delimiter)
if (
len(parts) != 3
or parts[0].startswith("<")
or parts[1].startswith("<")
):
raise InvalidSignal("Invalid signal event: %s" % event)
if (
parts[0] in RESERVED_NAMESPACES
and event not in RESERVED_NAMESPACES[parts[0]]
and not (parts[2].startswith("<") and parts[2].endswith(">"))
):
raise InvalidSignal(
"Cannot declare reserved signal event: %s" % event
)
return parts
def _clean_trigger(self, trigger: str) -> str:
trigger = trigger[1:-1]
if ":" in trigger:
trigger, _ = trigger.split(":")
return trigger
| |
import datetime
import decimal
import os
import StringIO
from django.conf import settings
from django.utils import six
import commonware.log
import defusedxml.ElementTree as etree
from jinja2 import Environment, FileSystemLoader
from rest_framework.exceptions import ParseError
from rest_framework.parsers import JSONParser, BaseParser
import mkt.constants.iarc_mappings as mappings
from mkt.constants import ratingsbodies
from mkt.site.helpers import strip_controls
from mkt.translations.utils import no_translation
log = commonware.log.getLogger('z.iarc')
root = os.path.join(settings.ROOT, 'lib', 'iarc')
env = Environment(loader=FileSystemLoader(os.path.join(root, 'templates')))
env.finalize = lambda x: strip_controls(x)
def render_xml(template, context):
"""
Renders an XML template given a dict of the context.
This also strips control characters before encoding.
"""
# All XML passed requires a password. Let's add it to the context.
context['password'] = settings.IARC_PASSWORD
context['platform'] = settings.IARC_PLATFORM
template = env.get_template(template)
return template.render(context)
def get_iarc_app_title(app):
"""Delocalized app name."""
from mkt.webapps.models import Webapp
with no_translation(app.default_locale):
delocalized_app = Webapp.with_deleted.get(pk=app.pk)
return unicode(delocalized_app.name)
class IARC_Parser(object):
"""
Base class for IARC XML and JSON parsers.
"""
def _process_iarc_items(self, data):
"""
Looks for IARC keys ('interactive_elements' or keys starting with
'rating_' or 'descriptors_') and trades them for a 'ratings' dictionary
or descriptor and interactive lists.
"""
rows = [] # New data object we'll return.
for row in data:
d = {}
ratings = {}
descriptors = []
interactives = []
for k, v in row.items():
# Get ratings body constant.
body = mappings.BODIES.get(k.split('_')[-1].lower(),
ratingsbodies.GENERIC)
if k == 'rating_system':
# This key is used in the Get_Rating_Changes API.
d[k] = mappings.BODIES.get(v.lower(),
ratingsbodies.GENERIC)
elif k == 'interactive_elements':
for interact in [s.strip() for s in v.split(',') if s]:
key = mappings.INTERACTIVES.get(interact)
if key:
interactives.append(key)
else:
log.error('Rating interactive %s DNE' % interact)
elif k.startswith('rating_'):
ratings[body] = mappings.RATINGS[body.id].get(
v, mappings.RATINGS[body.id]['default'])
elif k.startswith('descriptors_'):
for desc in [s.strip() for s in v.split(',') if s]:
key = mappings.DESCS[body.id].get(desc)
if key:
descriptors.append(key)
else:
log.error('Rating descriptor %s DNE' % desc)
else:
d[k] = v
if ratings:
d['ratings'] = ratings
if descriptors:
d['descriptors'] = descriptors
if interactives:
d['interactives'] = interactives
rows.append(d)
return rows
# From django-rest-framework 2.x.
class XMLParser(BaseParser):
"""
XML parser.
"""
media_type = 'application/xml'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
assert etree, 'XMLParser requires defusedxml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
parser = etree.DefusedXMLParser(encoding=encoding)
try:
tree = etree.parse(stream, parser=parser, forbid_dtd=True)
except (etree.ParseError, ValueError) as exc:
raise ParseError('XML parse error - %s' % six.text_type(exc))
data = self._xml_convert(tree.getroot())
return data
def _xml_convert(self, element):
"""
convert the xml `element` into the corresponding python object
"""
children = list(element)
if len(children) == 0:
return self._type_convert(element.text)
else:
# if the 1st child tag is list-item means all children are list-itm
if children[0].tag == "list-item":
data = []
for child in children:
data.append(self._xml_convert(child))
else:
data = {}
for child in children:
data[child.tag] = self._xml_convert(child)
return data
def _type_convert(self, value):
"""
Converts the value returned by the XMl parse into the equivalent
Python type
"""
if value is None:
return value
try:
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
pass
return value
class IARC_XML_Parser(XMLParser, IARC_Parser):
"""
Custom XML processor for IARC whack XML that defines all content in XML
attributes with no tag content and all tags are named the same. This builds
a dict using the "NAME" and "VALUE" attributes.
"""
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
data = super(IARC_XML_Parser, self).parse(stream, media_type,
parser_context)
# Process ratings, descriptors, interactives.
data = self._process_iarc_items(data)
# If it's a list, it had one or more "ROW" tags.
if isinstance(data, list):
data = {'rows': data}
return data
def parse_string(self, string):
# WARNING: Ugly hack.
#
# IARC XML is utf-8 encoded yet the XML has a utf-16 header. Python
# correctly reports the encoding mismatch and raises an error. So we
# replace it here to make things work.
string = string.replace('encoding="utf-16"', 'encoding="utf-8"')
return self.parse(StringIO.StringIO(string))
def _xml_convert(self, element):
"""
Convert the xml `element` into the corresponding Python object.
"""
children = list(element)
if len(children) == 0:
return self._type_convert(element.get('VALUE', ''))
else:
if children[0].tag == 'ROW':
data = []
for child in children:
data.append(self._xml_convert(child))
else:
data = {}
for child in children:
data[child.get('NAME',
child.tag)] = self._xml_convert(child)
return data
class IARC_JSON_Parser(JSONParser, IARC_Parser):
"""
JSON Parser to handle IARC's JSON format.
"""
def parse(self, stream, media_type=None, parser_context=None):
data = super(IARC_JSON_Parser, self).parse(stream, media_type,
parser_context)
data = self._convert(data)
data = self._process_iarc_items(data)
return data
def _convert(self, data):
"""
Converts JSON that looks like::
{
"NAME": "token",
"TYPE": "string",
"VALUE": "AB12CD3"
}
Into something more normal that looks like this::
{
"token": "AB12CD3"
}
"""
d = {}
for f in data['ROW']['FIELD']:
d[f['NAME']] = f['VALUE']
# Return a list to match the parsed XML.
return [d]
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from .constants import *
from numba import jit
import numpy as np
class DependencySelector(object):
"""
Fast method for identifying dependencies among labeling functions.
"""
def __init__(self):
pass
def select(self, L, higher_order=False, propensity=False, threshold=0.05, truncation=10):
"""
Identifies a dependency structure among labeling functions for a given data set.
By default searches for correlations, i.e., the DEP_SIMILAR dependency type.
:param L: labeling function output matrix
:param higher_order: bool indicating whether to additionally search for higher order
fixing and reinforcing dependencies (DEP_FIXING and DEP_REINFORCING)
:param propensity: bool indicating whether to include LF propensity dependencies during learning
:param threshold: minimum magnitude weight a dependency must have to be returned (in log scale), also
regularization strength
:param truncation: number of iterations between truncation step for regularization
:return: collection of tuples of the format (LF 1 index, LF 2 index, dependency type),
see snorkel.learning.constants
"""
try:
L = L.todense()
except AttributeError:
pass
m, n = L.shape
# Initializes data structures
deps = set()
n_weights = 2 * n
if higher_order:
n_weights += 4 * n
if propensity:
n_weights += 1
weights = np.zeros((n_weights,))
joint = np.zeros((6,))
# joint[0] = P(Y = -1, L_j = -1)
# joint[1] = P(Y = -1, L_j = 0)
# joint[2] = P(Y = -1, L_j = 1)
# joint[3] = P(Y = 1, L_j = -1)
# joint[4] = P(Y = 1, L_j = 0)
# joint[5] = P(Y = 1, L_j = 1)
for j in range(n):
# Initializes weights
for k in range(n):
weights[k] = 1.0
for k in range(n, len(weights)):
weights[k] = 0.0
if propensity:
weights[len(weights) - 1] = 0.0
_fit_deps(m, n, j, L, weights, joint, higher_order, propensity, threshold, truncation)
for k in range(n):
if abs(weights[n + k]) > threshold:
deps.add((j, k, DEP_SIMILAR) if j < k else (k, j, DEP_SIMILAR))
if higher_order:
if abs(weights[2 * n + k]) > threshold:
deps.add((j, k, DEP_REINFORCING))
if abs(weights[3 * n + k]) > threshold:
deps.add((k, j, DEP_REINFORCING))
if abs(weights[4 * n + k]) > threshold:
deps.add((j, k, DEP_FIXING))
if abs(weights[5 * n + k]) > threshold:
deps.add((k, j, DEP_FIXING))
return deps
@jit(nopython=True, cache=True, nogil=True)
def _fit_deps(m, n, j, L, weights, joint, higher_order, propensity, regularization, truncation):
step_size = 1.0 / m
epochs = 10
l1delta = regularization * step_size * truncation
last_weight = len(weights) - 1
for t in range(epochs):
for i in range(m):
# Processes a training example
# First, computes joint and conditional distributions
joint[:] = 0, 0, 0, 0, 0, 0
for k in range(n):
if j == k:
# Accuracy
joint[0] += weights[j]
joint[5] += weights[j]
joint[2] -= weights[j]
joint[3] -= weights[j]
else:
if L[i, k] == 1:
# Accuracy
joint[0] -= weights[k]
joint[1] -= weights[k]
joint[2] -= weights[k]
joint[3] += weights[k]
joint[4] += weights[k]
joint[5] += weights[k]
# Similar
joint[2] += weights[n + k]
joint[5] += weights[n + k]
if higher_order:
# Reinforcement
joint[5] += weights[2 * n + k] + weights[3 * n + k]
joint[1] -= weights[2 * n + k]
joint[4] -= weights[2 * n + k]
# Fixing
joint[3] += weights[4 * n + k]
joint[1] -= weights[4 * n + k]
joint[4] -= weights[4 * n + k]
joint[0] += weights[5 * n + k]
elif L[i, k] == -1:
# Accuracy
joint[0] += weights[k]
joint[1] += weights[k]
joint[2] += weights[k]
joint[3] -= weights[k]
joint[4] -= weights[k]
joint[5] -= weights[k]
# Similar
joint[0] += weights[n + k]
joint[3] += weights[n + k]
if higher_order:
# Reinforcement
joint[0] += weights[2 * n + k] + weights[3 * n + k]
joint[1] -= weights[2 * n + k]
joint[4] -= weights[2 * n + k]
# Fixing
joint[2] += weights[4 * n + k]
joint[1] -= weights[4 * n + k]
joint[4] -= weights[4 * n + k]
joint[5] += weights[5 * n + k]
else:
# Similar
joint[1] += weights[n + k]
joint[4] += weights[n + k]
if higher_order:
# Reinforcement
joint[0] -= weights[3 * n + k]
joint[2] -= weights[3 * n + k]
joint[3] -= weights[3 * n + k]
joint[5] -= weights[3 * n + k]
# Fixing
joint[0] -= weights[5 * n + k]
joint[2] -= weights[5 * n + k]
joint[3] -= weights[5 * n + k]
joint[5] -= weights[5 * n + k]
if propensity:
joint[0] += weights[last_weight]
joint[2] += weights[last_weight]
joint[3] += weights[last_weight]
joint[5] += weights[last_weight]
joint = np.exp(joint)
joint /= np.sum(joint)
marginal_pos = np.sum(joint[3:6])
marginal_neg = np.sum(joint[0:3])
if L[i, j] == 1:
conditional_pos = joint[5] / (joint[2] + joint[5])
conditional_neg = joint[2] / (joint[2] + joint[5])
elif L[i, j] == -1:
conditional_pos = joint[3] / (joint[0] + joint[3])
conditional_neg = joint[0] / (joint[0] + joint[3])
else:
conditional_pos = joint[4] / (joint[1] + joint[4])
conditional_neg = joint[1] / (joint[1] + joint[4])
# Second, takes likelihood gradient step
for k in range(n):
if j == k:
# Accuracy
weights[j] -= step_size * (joint[5] + joint[0] - joint[2] - joint[3])
if L[i, j] == 1:
weights[j] += step_size * (conditional_pos - conditional_neg)
elif L[i, j] == -1:
weights[j] += step_size * (conditional_neg - conditional_pos)
else:
if L[i, k] == 1:
# Accuracy
weights[k] -= step_size * (marginal_pos - marginal_neg - conditional_pos + conditional_neg)
# Similar
weights[n + k] -= step_size * (joint[2] + joint[5])
if L[i, j] == 1:
weights[n + k] += step_size
if higher_order:
# Incoming reinforcement
weights[2 * n + k] -= step_size * (joint[5] - joint[1] - joint[4])
if L[i, j] == 1:
weights[2 * n + k] += step_size * conditional_pos
elif L[i, j] == 0:
weights[2 * n + k] += step_size * -1
# Outgoing reinforcement
weights[3 * n + k] -= step_size * joint[5]
if L[i, j] == 1:
weights[3 * n + k] += step_size * conditional_pos
# Incoming fixing
weights[4 * n + k] -= step_size * (joint[3] - joint[1] - joint[4])
if L[i, j] == -1:
weights[4 * n + k] += step_size * conditional_pos
elif L[i, j] == 0:
weights[4 * n + k] += step_size * -1
# Outgoing fixing
weights[5 * n + k] -= step_size * joint[0]
if L[i, j] == -1:
weights[5 * n + k] += step_size * conditional_neg
elif L[i, k] == -1:
# Accuracy
weights[k] -= step_size * (marginal_neg - marginal_pos - conditional_neg + conditional_pos)
# Similar
weights[n + k] -= step_size * (joint[0] + joint[3])
if L[i, j] == -1:
weights[n + k] += step_size
if higher_order:
# Incoming reinforcement
weights[2 * n + k] -= step_size * (joint[0] - joint[1] - joint[4])
if L[i, j] == -1:
weights[2 * n + k] += step_size * conditional_neg
elif L[i, j] == 0:
weights[2 * n + k] += step_size * -1
# Outgoing reinforcement
weights[3 * n + k] -= step_size * joint[0]
if L[i, j] == -1:
weights[3 * n + k] += step_size * conditional_neg
# Incoming fixing
weights[4 * n + k] -= step_size * (joint[2] - joint[1] - joint[4])
if L[i, j] == 1:
weights[4 * n + k] += step_size * conditional_neg
elif L[i, j] == 0:
weights[4 * n + k] += step_size * -1
# Outgoing fixing
weights[5 * n + k] -= step_size * joint[5]
if L[i, j] == 1:
weights[5 * n + k] += step_size * conditional_pos
else:
# Similar
weights[n + k] -= step_size * (joint[1] + joint[4])
if L[i, j] == 0:
weights[n + k] += step_size
if higher_order:
# No effect of incoming reinforcement
# Outgoing reinforcement
weights[3 * n + k] -= step_size * (-1 * joint[0] - joint[2] - joint[3] - joint[5])
if L[i, j] != 0:
weights[3 * n + k] += step_size * -1
# No effect of incoming fixing
# Outgoing fixing
weights[5 * n + k] -= step_size * (-1 * joint[0] - joint[2] - joint[3] - joint[5])
if L[i, j] != 0:
weights[5 * n + k] += step_size * -1
if propensity:
weights[last_weight] -= step_size * (joint[0] + joint[2] + joint[3] + joint[5])
if L[i, j] != 0:
weights[last_weight] += step_size
# Third, takes regularization gradient step
if (t * m + i) % truncation == 0:
for k in range(len(weights)):
weights[k] = max(0, weights[k] - l1delta) if weights[k] > 0 else min(0, weights[k] + l1delta)
| |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from os import listdir, unlink
from os.path import join as path_join
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from swift.obj.diskfile import write_metadata, read_metadata, get_data_dir
from test.probe.common import ReplProbeTest
RETRIES = 5
def get_data_file_path(obj_dir):
files = []
# We might need to try a few times if a request hasn't yet settled. For
# instance, a PUT can return success when just 2 of 3 nodes has completed.
for attempt in range(RETRIES + 1):
try:
files = sorted(listdir(obj_dir), reverse=True)
break
except Exception:
if attempt < RETRIES:
time.sleep(1)
else:
raise
for filename in files:
return path_join(obj_dir, filename)
class TestObjectFailures(ReplProbeTest):
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEqual(odata, data)
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
node_id = (onode['port'] - 6000) / 10
device = onode['device']
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
get_data_dir(self.policy),
opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
def run_quarantine(self):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'VERIFY')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(odata, backend_data)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self):
container = 'container-range-%s' % uuid4()
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'RANGE')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx}
for header, result in [({'Range': 'bytes=0-2'}, backend_data[0:3]),
({'Range': 'bytes=1-11'}, backend_data[1:]),
({'Range': 'bytes=0-11'}, backend_data)]:
req_headers = base_headers.copy()
req_headers.update(header)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj,
headers=req_headers)[-1]
self.assertEqual(odata, result)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_head_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
headers = {'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two',
'X-Backend-Storage-Policy-Index': self.policy.idx}
direct_client.direct_post_object(
onode, opart, self.account,
container, obj,
headers=headers,
conn_timeout=1,
response_timeout=1)
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEqual(err.http_status, 404)
def test_runner(self):
self.run_quarantine()
self.run_quarantine_range_etag()
self.run_quarantine_zero_byte_get()
self.run_quarantine_zero_byte_head()
self.run_quarantine_zero_byte_post()
if __name__ == '__main__':
main()
| |
import numpy
from chainer import backend
from chainer import function_node
import chainer.functions
import chainer.utils
from chainer.utils import type_check
import chainerx
class SelectorBase(function_node.FunctionNode):
"""Select an array element from a given axis or set of axes."""
def __init__(self, axis=None, keepdims=False):
self.keepdims = keepdims
if axis is None:
self.axis = None
elif isinstance(axis, int):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
y = self.get_retained_outputs()[0]
if self.axis is None:
axis = range(x.ndim)
else:
axis = [ax % x.ndim for ax in self.axis]
# Add broadcastable dimensions to y and gy
# for each one that was reduced in the forward operation
shape = [s if ax not in axis else 1 for ax, s in enumerate(x.shape)]
gy = gy[0].reshape(shape)
y = y.reshape(shape)
# Compute the gradient
cond = (x.data == y.data)
gy = chainer.functions.broadcast_to(gy, cond.shape)
return gy * cond,
class Max(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amax(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amax(x, axis=self.axis, keepdims=self.keepdims)
class Min(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amin(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amin(x, axis=self.axis, keepdims=self.keepdims)
class IndexSelectorBase(function_node.FunctionNode):
"""Select index of an array element from a given axis."""
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('None or int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f'
)
if self.axis is not None:
if self.axis >= 0:
type_check.expect(
self.axis < in_types[0].ndim,
)
else:
type_check.expect(
-self.axis - 1 < in_types[0].ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, grad_outputs):
return None,
class ArgMin(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmin(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmin(x, axis=self.axis).astype(numpy.int32)
class ArgMax(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmax(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmax(x, axis=self.axis).astype(numpy.int32)
def max(x, axis=None, keepdims=False):
"""Maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be maximized.
axis (None, int, or tuple of int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Max(axis, keepdims).apply((x,))[0]
def min(x, axis=None, keepdims=False):
"""Minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be minimized.
axis (None, int, or tuple of int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Min(axis, keepdims).apply((x,))[0]
def argmax(x, axis=None):
"""Returns index which holds maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find maximum elements.
axis (None or int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMax(axis).apply((x,))[0]
def argmin(x, axis=None):
"""Returns index which holds minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find minimum elements.
axis (None or int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMin(axis).apply((x,))[0]
| |
"""
paper_experiments.py
Uses other modules to re-produce the results and paper graphs contained
in the paper. Authors wanting to reproduce or compare to our algorithm can
run the experiments by executing:
python paper_experiments.py
from the command line
Paper:
Mark Fuge, Josh Stroud, Alice Agogino. "Automatically Inferring Metrics for Design Creativity," in Proceedings of ASME 2013 International Design Engineering Technical Conferences & Computers and Information in Engineering Conference, August 4-2, 2013, Portland, USA
http://www.markfuge.com/papers/Fuge_DETC2013-12620.pdf
Authors: Josh Stroud and Mark Fuge
"""
import os
import numpy as np
import pylab as pl
from variety_model import *
# Where to save plots
plot_path = "plots/"
# For output plots
def setfont():
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 20}
pl.matplotlib.rc('font', **font)
def genConvergencePlots(metric="shah", numLevels = 4, cover_type = None):
''' Generates various sensitivity plots presented in the paper.
Notably,'''
x = []
y = []
xplots = []
yplots = []
### SCRIPT PARAMETERS
errorCoeffs = [0, 1, 2, 3, 5]
plotFlag = 1
numRepeat = 3 # Amount of resampling - increasing this will improve the
# statistical reliability of the resulting accuracy estimates,
# at the cost of additional computation time.
numSamples = 50 # Sets the x-scale fidelity of the convergence plots (Figs. 3-5)
# Increasing this will increase the number of experiments conducted
# thus increasing run time
# Init some array storage
xmat = np.zeros([numSamples])
ymat = np.zeros([numSamples,len(errorCoeffs)])
yerr = np.zeros([numSamples,len(errorCoeffs)])
ytmat = np.zeros([numSamples,len(errorCoeffs)])
yterr = np.zeros([numSamples,len(errorCoeffs)])
# close any open plot windows
pl.close('all')
if not cover_type:
if metric == 'shah':
cover_type = 'set'
elif metric == 'verhaegen':
cover_type = 'prob'
print "generating convergence plots"
print "using",metric,"variety metric, numLevels =",numLevels
if(cover_type):
print "Cover Type:",cover_type
else:
print "Cover Type: Default"
# One-time generation of all the random tree samples.
# We'll then partion up the dataset such that we use only the required
# fraction of training samples for the model.
max_comparisons = 10000
numConceptsPerTree = 10
# Generates the data
print "Generating Data Samples..."
X,Y,Ytrue = generate_comparison_data(numConceptsPerTree = numConceptsPerTree,
numComparisons = max_comparisons,
metric = metric,
cover_type = cover_type,
E = errorCoeffs,
numLevels = numLevels)
# Now we have generated all of the simulated concept sets, as well as
# All of the noisy ratings and true ratings. We can now run the experiments
print "Running Experiments..."
# This will determine the range of comparisons we will test over.
xmat = np.round(np.linspace(0 , 1500, numSamples+1))
xmat = xmat[1:]
# Runs the model Training and Evaluation
for j, numTraining in enumerate(xmat):
numTraining = int(numTraining)
if(j % 10 == 0):
print "Processing sample",j,"/",numSamples
# Run the model
errScores,gterrScores = runSklearn(X,Y,Ytrue, numTraining,numRetest=numRepeat)
# errScores now contains an array of tuples (mean, std) of the scores across
# numRetest runs of the data
if(plotFlag == 1):
for i,e in enumerate(errorCoeffs):
ymat[j,i] = errScores[i][0]
yerr[j,i] = errScores[i][1]
ytmat[j,i] = gterrScores[i][0]
yterr[j,i] = gterrScores[i][1]
# Print out a sample of accuracy point estimates
print "Final accuracy for metric: "+metric
for i in range(0,numSamples,numSamples/10)[1:]:
print "n: %d\tacc: %.1f"%(xmat[i],100*ymat[i,0])
# Now do the plotting
if(plotFlag == 1):
method_fig = pl.figure(metric+' Training Convergence')
pl.hold(True)
x = xmat
for i,e in enumerate(errorCoeffs):
#pl.plot(x,ymat[:,i],'-',label='E = ' + str(e))
# uncomment for 95% confidence interval
pl.errorbar(x,ymat[:,i],yerr[:,i]*1.96,label=r'$\sigma$'+': ' + str(e))
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Noisy Label Prediction accuracy")
pl.title(metric+" Training, levels:"+ str(numLevels)+", cover:"+cover_type)
pl.ylim((.5,1.0))
pl.xlim((0,x[-1]))
pl.legend(loc=4,prop={'size':14})
# uncomment below if you want interactive plotting
#pl.show()
pl.savefig(plot_path +
"metric=" + metric +
"_numLevels=" + str(numLevels) +
"_cover=" + cover_type +
"_training_convergence.pdf")
method_fig = pl.figure(metric+' Ground Truth Convergence')
pl.hold(True)
for i,e in enumerate(errorCoeffs):
pl.plot(x,ytmat[:,i],label=r'$\sigma$'+': ' + str(e))
# uncomment for 95% confidence interval
#pl.errorbar(x,ytmat[:,i],yterr[:,i]*1.96,label=r'$\sigma$'+': ' + str(e))
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Ground Truth Prediction accuracy")
pl.title(metric+" Truth, levels:"+ str(numLevels)+", cover:"+cover_type)
pl.ylim((.5,1.0))
pl.xlim((0,x[-1]))
pl.legend(loc=4,prop={'size':14})
# uncomment below if you want interactive plotting
#pl.show()
pl.savefig(plot_path +
"metric=" + metric +
"_numLevels=" + str(numLevels) +
"_cover=" + cover_type +
"_groundtruth_convergence.pdf")
print "Completed Convergence Experiment!\n"
return xmat,ymat
def genExperimentalResults():
''' Generates the main experimental results and figures used in the paper
'''
shahx,shahy = genConvergencePlots("shah",cover_type="set")
shah_prob_x,shah_prob_y = genConvergencePlots("shah",cover_type="prob")
verhx,verhy = genConvergencePlots("verhaegen",cover_type="prob")
verh_set_x,verh_set_y = genConvergencePlots("verhaegen",cover_type="set")
x = shahx
compare_fig = pl.figure('Convergence of different metrics')
pl.hold(True)
pshah = pl.plot(shahx,shahy[:,0],'k-',label="Shah (Set)",linewidth=3)
pshah_prob = pl.plot(shah_prob_x,shah_prob_y[:,0],'k-',label="Shah (Prob)",linewidth=1)
pverh_set = pl.plot(verh_set_x,verh_set_y[:,0],'b--',label="Verhaegen (Set)",linewidth=3)
pverh = pl.plot(verhx,verhy[:,0],'b--',label="Verhaegen (Prob)",linewidth=1)
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Prediction accuracy")
pl.title("Comparison of various metrics")
pl.ylim((.5,1.0))
pl.xlim((0,shahx[-1]))
pl.legend(loc=4)
# Uncomment if you want interactive plotting
#pl.show()
pl.savefig(plot_path+"metric_convergence_comparison.pdf")
def genSensitivityResults():
''' Generates sensitivity results regarding number of tree levels and how
increasing the number of estimation parameters affects convergence.
Didn't have space to include these figures in the conference paper.
'''
shahx_a,shahy_a = genConvergencePlots("shah", numLevels=4)
shahx_b,shahy_b = genConvergencePlots("shah", numLevels=10)
shahx_c,shahy_c = genConvergencePlots("shah", numLevels=25)
shahx_d,shahy_d = genConvergencePlots("shah", numLevels=50)
compare_fig = pl.figure('Convergence of different metrics')
pl.hold(True)
shaha = pl.plot(shahx_a,shahy_a[:,0],'k-',label="# Shah Levels = 2",linewidth=3)
shahb = pl.plot(shahx_b,shahy_b[:,0],'k-',label="# Shah Levels = 4",linewidth=1)
shahc = pl.plot(shahx_c,shahy_c[:,0],'b--',label="# Shah Levels = 10",linewidth=3)
shahd = pl.plot(shahx_d,shahy_d[:,0],'b--',label="# Shah Levels = 50",linewidth=1)
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Prediction accuracy")
pl.title("Comparison of various metrics")
pl.ylim((.5,1.0))
pl.xlim((0,shahx_a[-1]))
pl.legend(loc=4)
# Uncomment if you want interactive plotting
#pl.show()
pl.savefig(plot_path+"sensitivity_convergence_comparison.pdf")
# script below to generate plots
if __name__ == "__main__":
setfont()
if not os.path.exists(plot_path):
os.makedirs(plot_path)
genExperimentalResults()
genSensitivityResults()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import contextlib
import cPickle as pickle
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
default=None,
help='URL for connection to XenServer/Xen Cloud Platform. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('xenapi_check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
default=5,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
default=None,
help='iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='iSCSI Target Port, 3260 Default'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('xenapi_remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
cfg.IntOpt('xenapi_login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type, block_device_info)
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage from VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': 0,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': host_stats['host_cpu_info']['cpu_count']}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(ctxt,
instance_ref,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us. May be used in the future to
populate the vdi/vif maps.
"""
pass
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params
"""
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data)
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration.
:params block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pre_live_migration_result = {}
pre_live_migration_result['sr_uuid_map'] = \
self._vmops.attach_block_device_volumes(block_device_info)
return pre_live_migration_result
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params network_info: instance network information
:params : block_migration: if true, post operation of block_migraiton.
"""
# TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
pass
def unfilter_instance(self, instance_ref, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated.
"""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group.
"""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group.
"""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(host, action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): remove this function once all virts return false
return False
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage
info
"""
return self._vmops.get_per_instance_usage()
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._virtapi = virtapi
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = self._virtapi.aggregate_get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
product_brand = software_version.get('product_brand')
if None in (product_version_str, product_brand):
return (None, None)
product_version = tuple(int(part) for part in
product_version_str.split('.'))
return product_version, product_brand
def _get_software_version(self):
host = self.get_xenapi_host()
return self.call_xenapi('host.get_software_version', host)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def get_xenapi_host(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(johannes): Fetch host before we acquire a session. Since
# get_xenapi_host() acquires a session too, it can result in a
# deadlock if multiple greenthreads race with each other. See
# bug 924918
host = self.get_xenapi_host()
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
host, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug(_("Got exception: %s"), exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
for ref in self.call_xenapi('%s.get_all' % record_type):
rec = self.get_rec(record_type, ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_record call
if rec:
yield ref, rec
| |
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
import os.path
from basic_modules.workflow import Workflow
from utils import logger
from utils import remap
from tool.forge_bsgenome import bsgenomeTool
from tool.bwa_mem_aligner import bwaAlignerMEMTool
from tool.biobambam_filter import biobambam
from tool.idear import idearTool
# ------------------------------------------------------------------------------
class process_damidseq(Workflow):
"""
Functions for processing Chip-Seq FastQ files. Files are the aligned,
filtered and analysed for peak calling
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing DamID-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def _align_filter(self, align_input_files, align_input_file_meta, output_files):
"""
Function for performing the alignment and filtering of fastq files.
"""
output_files_generated = {}
output_metadata_generated = {}
bwa = bwaAlignerMEMTool(self.configuration)
logger.progress("BWA MEM Aligner - " + align_input_files["loc"], status="RUNNING")
bwa_files, bwa_meta = bwa.run(
align_input_files, align_input_file_meta,
{"output": output_files["bam"], "bai": output_files["bai"]}
)
logger.progress("BWA MEM Aligner - " + align_input_files["loc"], status="DONE")
try:
output_files_generated["bam"] = bwa_files["bam"]
output_metadata_generated["bam"] = bwa_meta["bam"]
tool_name = output_metadata_generated["bam"].meta_data["tool"]
output_metadata_generated["bam"].meta_data["tool_description"] = tool_name
output_metadata_generated["bam"].meta_data["tool"] = "process_damidseq"
output_files_generated["bai"] = bwa_files["bai"]
output_metadata_generated["bai"] = bwa_meta["bai"]
tool_name = output_metadata_generated["bai"].meta_data["tool"]
output_metadata_generated["bai"].meta_data["tool_description"] = tool_name
output_metadata_generated["bai"].meta_data["tool"] = "process_damidseq"
except KeyError as msg:
logger.fatal(
"KeyError error - BWA aligner failed: {0}\n{1}\n{2}\n{3}".format(
msg, output_files_generated["bam"],
"Available file keys: " + ", ".join(bwa_files.keys()),
"Available mets keys: " + ", ".join(bwa_meta.keys())
)
)
return {}, {}
# Filter the bams
b3f = biobambam(self.configuration)
logger.progress("BioBamBam Filtering - " + align_input_files["loc"], status="RUNNING")
b3f_files, b3f_meta = b3f.run(
{"input": bwa_files["bam"]},
{"input": bwa_meta["bam"]},
{"output": output_files["bam_filtered"], "bai": output_files["bai_filtered"]}
)
logger.progress("BioBamBam Filtering - " + align_input_files["loc"], status="DONE")
try:
output_files_generated["bam_filtered"] = b3f_files["bam"]
output_metadata_generated["bam_filtered"] = b3f_meta["bam"]
tool_name = output_metadata_generated["bam_filtered"].meta_data["tool"]
output_metadata_generated["bam_filtered"].meta_data["tool_description"] = tool_name
output_metadata_generated["bam_filtered"].meta_data["tool"] = "process_damidseq"
output_files_generated["bai_filtered"] = b3f_files["bai"]
output_metadata_generated["bai_filtered"] = b3f_meta["bai"]
tool_name = output_metadata_generated["bai_filtered"].meta_data["tool"]
output_metadata_generated["bai_filtered"].meta_data["tool_description"] = tool_name
output_metadata_generated["bai_filtered"].meta_data["tool"] = "process_damidseq"
except KeyError as msg:
logger.fatal("KeyError error - BioBamBam filtering failed: {0}\n{1}".format(
msg, output_files_generated["bam_filtered"]))
return {}, {}
return (output_files_generated, output_metadata_generated)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing DamID-seq FastQ data. Pipeline aligns
the FASTQ files to the genome using BWA. iDEAR is then used for peak
calling to identify transcription factor binding sites within the
genome.
Currently this can only handle a single data file and a single
background file.
Parameters
----------
input_files : dict
Location of the initial input files required by the workflow
genome : str
Genome FASTA file
index : str
Location of the BWA archived index files
fastq_1 : str
Location of the FASTQ reads files
fastq_2 : str
Location of the FASTQ repeat reads files
bg_fastq_1 : str
Location of the background FASTQ reads files
bg_fastq_2 : str
Location of the background FASTQ repeat reads files
metadata : dict
Input file meta data associated with their roles
genome : str
index : str
fastq_1 : str
fastq_2 : str
bg_fastq_1 : str
bg_fastq_2 : str
output_files : dict
Output file locations
bam [, "bam_bg"] : str
filtered [, "filtered_bg"] : str
Returns
-------
output_files : dict
Output file locations associated with their roles, for the output
bam [, "bam_bg"] : str
Aligned FASTQ short read file [ and aligned background file]
locations
filtered [, "filtered_bg"] : str
Filtered versions of the respective bam files
bigwig : str
Location of the bigwig peaks
output_metadata : dict
Output metadata for the associated files in output_files
bam [, "bam_bg"] : Metadata
filtered [, "filtered_bg"] : Metadata
bigwig : Metadata
"""
output_files_generated = {
"bam": [],
"bam_filtered": []
}
output_metadata = {
"bam": [],
"bam_filtered": []
}
# BSgenome
logger.info("Generating BSgenome")
if "genome_public" in input_files:
genome_input_file = {"genome": input_files["genome_public"]}
genome_input_meta = {"genome": metadata["genome_public"]}
else:
genome_input_file = {"genome": input_files["genome"]}
genome_input_meta = {"genome": metadata["genome"]}
bsg = bsgenomeTool(self.configuration)
logger.progress("BSgenome Indexer", status="RUNNING")
bsgi, bsgm = bsg.run(
genome_input_file,
genome_input_meta,
{
"bsgenome": output_files["bsgenome"],
"chrom_size": output_files["chrom_size"],
"genome_2bit": output_files["genome_2bit"],
"seed_file": output_files["seed_file"]
}
)
logger.progress("BSgenome Indexer", status="DONE")
try:
file_keys = ["bsgenome", "chrom_size", "genome_2bit", "seed_file"]
for file_key in file_keys:
output_files_generated[file_key] = bsgi[file_key]
output_metadata[file_key] = bsgm[file_key]
tool_name = output_metadata[file_key].meta_data['tool']
output_metadata[file_key].meta_data['tool_description'] = tool_name
output_metadata[file_key].meta_data['tool'] = "process_damidseq"
except KeyError:
logger.fatal("BSgenome indexer failed")
return {}, {}
# Align and filter reads
for prefix in ["", "bg_"]:
for i, aln in enumerate(input_files[prefix + "fastq_1"]):
logger.info("BWA MEM Aligning and filtering of " + aln)
if "genome_public" in input_files:
align_input_files = remap(
input_files, genome="genome_public", index="index_public",
loc=input_files[prefix + "fastq_1"][i])
align_input_file_meta = remap(
metadata, genome="genome_public", index="index_public",
loc=input_files[prefix + "fastq_1"][i])
else:
align_input_files = remap(
input_files, genome="genome", index="index",
loc=input_files[prefix + "fastq_1"][i])
align_input_file_meta = remap(
metadata, genome="genome", index="index",
loc=input_files[prefix + "fastq_1"][i])
if prefix + "fastq_2" in input_files:
align_input_files["fastq_2"] = input_files[prefix + "fastq_2"][i]
align_input_file_meta["fastq_2"] = metadata[prefix + "fastq_2"][i]
fastq_in = os.path.split(input_files[prefix + "fastq_1"][i])
fastq_suffix = fastq_in[1].split(".")[-1]
align_output_files = {
"bam": os.path.join(
self.configuration["execution"],
fastq_in[1].replace(fastq_suffix, "bam")
),
"bam_filtered": os.path.join(
self.configuration["execution"],
fastq_in[1].replace(fastq_suffix, "filtered.bam")
),
"bai": os.path.join(
self.configuration["execution"],
fastq_in[1].replace(fastq_suffix, "bai")
),
"bai_filtered": os.path.join(
self.configuration["execution"],
fastq_in[1].replace(fastq_suffix, "filtered.bai")
)
}
bwa_files, bwa_meta = self._align_filter(
align_input_files, align_input_file_meta, align_output_files)
try:
output_files_generated[prefix + "bam"].append(bwa_files["bam"])
output_metadata[prefix + "bam"].append(bwa_meta["bam"])
output_files_generated[prefix + "bam_filtered"].append(
bwa_files["bam_filtered"])
output_metadata[prefix + "bam_filtered"].append(bwa_meta["bam"])
output_files_generated[prefix + "bai"].append(bwa_files["bai"])
output_metadata[prefix + "bai"].append(bwa_meta["bai"])
output_files_generated[prefix + "bai_filtered"].append(
bwa_files["bai_filtered"])
output_metadata[prefix + "bai_filtered"].append(bwa_meta["bai"])
except KeyError as msg:
logger.fatal("Error aligning and filtering input FASTQ files")
return {}, {}
# iDEAR to call peaks
idear_caller = idearTool(self.configuration)
logger.progress("iDEAR Peak Caller", status="RUNNING")
idear_files, idear_meta = idear_caller.run(
{
"bam": output_files_generated["bam_filtered"],
"bg_bam": output_files_generated["bg_bam_filtered"],
"bsgenome": input_files["bsgenome"]
}, {
"bam": output_metadata["bam_filtered"],
"bg_bam": output_metadata["bg_bam_filtered"],
"bsgenome": metadata["bsgenome"]
}, {
"bigwig": output_files["bigwig"],
}
)
logger.progress("iDEAR Peak Caller", status="DONE")
try:
output_files_generated["bigwig"] = idear_files["bigwig"]
output_metadata["bigwig"] = idear_meta["bigwig"]
tool_name = output_metadata["bigwig"].meta_data["tool"]
output_metadata["bigwig"].meta_data["tool_description"] = tool_name
output_metadata["bigwig"].meta_data["tool"] = "process_damidseq"
except KeyError as msg:
logger.fatal("KeyError error - iDEAR filtering failed: {0}\n{1}".format(
msg, "bigwig"))
return {}, {}
return output_files_generated, output_metadata
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_damidseq,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="iDamID-seq peak calling")
PARSER.add_argument(
"--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| |
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reversible Residual Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import rev_block
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class TransformerRevnet(transformer.Transformer):
"""Reversible Residual Transformer.
Layers are reversible and are recomputed on the backward pass.
y1 = x1 + f(x2)
y2 = x2 + g(y1)
f: Attention
g: Feed-forward
"""
def model_fn_body(self, features):
hparams = self._hparams
targets = features["targets"]
inputs = features["inputs"]
target_space = features["target_space_id"]
inputs = common_layers.flatten4d3d(inputs)
targets = common_layers.flatten4d3d(targets)
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = (transformer.transformer_prepare_encoder(
inputs, target_space, hparams))
(decoder_input,
decoder_self_attention_bias) = transformer.transformer_prepare_decoder(
targets, hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer_revnet_encoder(
encoder_input, encoder_self_attention_bias, hparams)
decoder_output = transformer_revnet_decoder(
decoder_input, encoder_output, decoder_self_attention_bias,
encoder_decoder_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, 2)
return decoder_output
def transformer_revnet_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder"):
"""A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
"""
def f(x, side_input):
"""f(x) for reversible layer, self-attention layer."""
encoder_self_attention_bias = side_input[0]
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size // 2
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
def g(x):
"""g(x) for reversible layer, feed-forward layer."""
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size // 2
with tf.variable_scope("ffn"):
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams), hparams)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
x1, x2 = tf.split(encoder_input, 2, axis=-1)
with tf.variable_scope(name):
y1, y2 = rev_block.rev_block(
x1,
x2,
f,
g,
num_layers=hparams.num_hidden_layers,
f_side_input=[encoder_self_attention_bias],
is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)
y = tf.concat([y1, y2], axis=-1)
return common_layers.layer_preprocess(y, hparams)
def transformer_revnet_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder"):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
"""
def f(x, side_input):
"""f(x) for reversible layer, self-attention and enc-dec attention."""
decoder_self_attention_bias = side_input[0]
encoder_decoder_attention_bias = side_input[1]
encoder_output = side_input[2]
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size // 2
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
y = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), encoder_output, encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
def g(x):
"""g(x) for reversible layer, feed-forward layer."""
old_hid_size = hparams.hidden_size
hparams.hidden_size = old_hid_size // 2
with tf.variable_scope("ffn"):
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams), hparams)
y = common_layers.layer_postprocess(x, y, hparams)
hparams.hidden_size = old_hid_size
return y
x1, x2 = tf.split(decoder_input, 2, axis=-1)
with tf.variable_scope(name):
y1, y2 = rev_block.rev_block(
x1,
x2,
f,
g,
num_layers=hparams.num_hidden_layers,
f_side_input=[
decoder_self_attention_bias, encoder_decoder_attention_bias,
encoder_output
],
is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)
y = tf.concat([y1, y2], axis=-1)
return common_layers.layer_preprocess(y, hparams)
@registry.register_hparams
def transformer_revnet_base():
"""Base hparams for TransformerRevnet."""
hparams = transformer.transformer_big()
# Use settings from transformer_n_da
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.learning_rate = 0.4
return hparams
@registry.register_hparams
def transformer_revnet_big():
"""Base hparams for TransformerRevnet."""
hparams = transformer_revnet_base()
# The TransformerRevnet uses significantly less memory than the Transformer.
# Increase batch size and model size.
hparams.batch_size *= 2
hparams.hidden_size *= 2
hparams.num_heads *= 2
hparams.num_hidden_layers += 1
return hparams
| |
import os
import glob
import shutil
from setup_app import paths
from setup_app.static import PersistenceType
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils import base
from setup_app.installers.jetty import JettyInstaller
class SamlInstaller(JettyInstaller):
def __init__(self):
self.service_name = 'idp'
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installSaml'
self.register_progess()
self.needdb = True
self.source_files = [
(os.path.join(Config.distGluuFolder,'idp.war'), Config.maven_root + '/maven/org/gluu/oxshibbolethIdp/{0}/oxshibbolethIdp-{0}.war'.format(Config.oxVersion)),
(os.path.join(Config.distGluuFolder,'idp3_cml_keygenerator.jar'), Config.maven_root + '/maven/org/gluu/oxShibbolethKeyGenerator/{0}/oxShibbolethKeyGenerator-{0}.jar'.format(Config.oxVersion)),
(os.path.join(Config.distGluuFolder,'shibboleth-idp.jar'), Config.maven_root + '/maven/org/gluu/oxShibbolethStatic/{0}/oxShibbolethStatic-{0}.jar'.format(Config.oxVersion)),
]
self.templates_folder = os.path.join(Config.templateFolder, 'idp')
self.output_folder = os.path.join(Config.outputFolder, 'idp')
self.ldif_config = os.path.join(self.output_folder, 'configuration.ldif')
self.ldif_clients = os.path.join(self.output_folder, 'clients.ldif')
self.ldif_oxidp = os.path.join(self.output_folder, 'oxidp.ldif')
self.oxidp_config_json = os.path.join(self.output_folder, 'oxidp-config.json')
self.shibJksFn = os.path.join(Config.certFolder, 'shibIDP.jks')
self.shibboleth_version = 'v3'
self.data_source_properties = os.path.join(self.output_folder, 'datasource.properties')
self.staticIDP3FolderConf = os.path.join(Config.install_dir, 'static/idp3/conf')
self.staticIDP3FolderMetadata = os.path.join(Config.install_dir, 'static/idp3/metadata')
self.oxtrust_conf_fn = os.path.join(self.output_folder, 'oxtrust_conf.json')
self.idp3_configuration_properties = 'idp.properties'
self.idp3_configuration_ldap_properties = 'ldap.properties'
self.idp3_configuration_saml_nameid = 'saml-nameid.properties'
self.idp3_configuration_services = 'services.properties'
self.idp3_configuration_password_authn = 'authn/password-authn-config.xml'
self.idp3_metadata = 'idp-metadata.xml'
self.idp3Folder = '/opt/shibboleth-idp'
self.idp3MetadataFolder = os.path.join(self.idp3Folder, 'metadata')
self.idp3MetadataCredentialsFolder = os.path.join(self.idp3MetadataFolder, 'credentials')
self.idp3LogsFolder = os.path.join(self.idp3Folder, 'logs')
self.idp3LibFolder = os.path.join(self.idp3Folder, 'lib')
self.idp3ConfFolder = os.path.join(self.idp3Folder, 'conf')
self.idp3ConfAuthnFolder = os.path.join(self.idp3Folder, 'conf/authn')
self.idp3CredentialsFolder = os.path.join(self.idp3Folder, 'credentials')
self.idp3WebappFolder = os.path.join(self.idp3Folder, 'webapp')
self.shib_key_file = os.path.join(Config.certFolder, 'shibIDP.key')
self.shib_crt_file = os.path.join(Config.certFolder, 'shibIDP.crt')
self.idp_encryption_crt_file = os.path.join(Config.certFolder, 'idp-encryption.crt')
self.idp_signing_crt_file = os.path.join(Config.certFolder, 'idp-signing.crt')
def install(self):
self.logIt("Install SAML Shibboleth IDP v3...")
self.unpack_idp3()
if not base.argsp.dummy:
if not Config.get('shibJksPass'):
Config.shibJksPass = self.getPW()
Config.encoded_shib_jks_pw = self.obscure(Config.shibJksPass)
# generate crypto
self.gen_cert('shibIDP', Config.shibJksPass, 'jetty')
self.gen_cert('idp-encryption', Config.shibJksPass, 'jetty')
self.gen_cert('idp-signing', Config.shibJksPass, 'jetty')
self.gen_keystore('shibIDP',
self.shibJksFn,
Config.shibJksPass,
self.shib_key_file,
self.shib_crt_file
)
if Config.mappingLocations['user'] == 'couchbase':
Config.templateRenderingDict['idp_attribute_resolver_ldap.search_filter'] = '(&(|(lower(uid)=$requestContext.principalName)(mail=$requestContext.principalName))(objectClass=gluuPerson))'
# Process templates
self.renderTemplateInOut(self.idp3_configuration_properties, self.staticIDP3FolderConf, self.idp3ConfFolder)
self.renderTemplateInOut(self.idp3_configuration_ldap_properties, self.staticIDP3FolderConf, self.idp3ConfFolder)
self.renderTemplateInOut(self.idp3_configuration_saml_nameid, self.staticIDP3FolderConf, self.idp3ConfFolder)
self.renderTemplateInOut(self.idp3_configuration_services, self.staticIDP3FolderConf, self.idp3ConfFolder)
self.renderTemplateInOut(
self.idp3_configuration_password_authn,
os.path.join(self.staticIDP3FolderConf, 'authn'),
os.path.join(self.idp3ConfFolder, 'authn')
)
# load certificates to update metadata
Config.templateRenderingDict['idp3EncryptionCertificateText'] = self.load_certificate_text(self.idp_encryption_crt_file)
Config.templateRenderingDict['idp3SigningCertificateText'] = self.load_certificate_text(self.idp_signing_crt_file)
# update IDP3 metadata
self.renderTemplateInOut(self.idp3_metadata, self.staticIDP3FolderMetadata, self.idp3MetadataFolder)
self.installJettyService(self.jetty_app_configuration[self.service_name], True)
jettyServiceWebapps = os.path.join(self.jetty_base, self.service_name, 'webapps')
self.copyFile(self.source_files[0][0], jettyServiceWebapps)
self.war_for_jetty10(os.path.join(jettyServiceWebapps, os.path.basename(self.source_files[0][0])))
# Prepare libraries needed to for command line IDP3 utilities
self.install_saml_libraries()
if not base.argsp.dummy:
# generate new keystore with AES symmetric key
# there is one throuble with Shibboleth IDP 3.x - it doesn't load keystore from /etc/certs. It accepts %{idp.home}/credentials/sealer.jks %{idp.home}/credentials/sealer.kver path format only.
cmd = [Config.cmd_java,'-classpath', '"{}"'.format(os.path.join(self.idp3Folder,'webapp/WEB-INF/lib/*')),
'net.shibboleth.utilities.java.support.security.BasicKeystoreKeyStrategyTool',
'--storefile', os.path.join(self.idp3Folder,'credentials/sealer.jks'),
'--versionfile', os.path.join(self.idp3Folder, 'credentials/sealer.kver'),
'--alias secret',
'--storepass', Config.shibJksPass]
self.run(' '.join(cmd), shell=True)
couchbase_mappings = self.getMappingType('couchbase')
if 'user' in couchbase_mappings:
self.saml_couchbase_settings()
self.saml_persist_configurations()
self.run([paths.cmd_chown, '-R', 'jetty:jetty', self.idp3Folder])
self.enable()
def unpack_idp3(self):
# unpack IDP3 JAR with static configs
tmpShibpDir = os.path.join('/tmp', os.urandom(5).hex())
self.logIt("Unpacking %s..." % self.source_files[2][0])
self.createDirs(tmpShibpDir)
self.run([Config.cmd_jar, 'xf', self.source_files[2][0]], tmpShibpDir)
self.copyTree(os.path.join(tmpShibpDir, 'shibboleth-idp'), '/opt/shibboleth-idp')
self.removeDirs(tmpShibpDir)
def generate_configuration(self):
self.check_clients([('idp_client_id', '1101.')])
if not Config.get('idpClient_pw'):
Config.idpClient_pw = self.getPW()
Config.idpClient_encoded_pw = self.obscure(Config.idpClient_pw)
def render_import_templates(self):
self.renderTemplateInOut(self.oxidp_config_json, self.templates_folder, self.output_folder)
Config.templateRenderingDict['oxidp_config_base64'] = self.generate_base64_ldap_file(self.oxidp_config_json)
for tmp in (self.ldif_config, self.ldif_oxidp, self.oxtrust_conf_fn, self.ldif_clients):
self.renderTemplateInOut(tmp, self.templates_folder, self.output_folder)
self.dbUtils.import_ldif([self.ldif_config, self.ldif_oxidp, self.ldif_clients])
def update_backend(self):
self.dbUtils.enable_service('gluuSamlEnabled')
oxtrust_conf = base.readJsonFile(self.oxtrust_conf_fn)
self.dbUtils.set_oxTrustConfApplication(oxtrust_conf)
def install_saml_libraries(self):
# Unpack oxauth.war to get bcprov-jdk16.jar
tmpIdpDir = os.path.join('/tmp', os.urandom(5).hex())
self.logIt("Unpacking %s..." % self.source_files[0][0])
self.createDirs(tmpIdpDir)
self.run([Config.cmd_jar, 'xf', self.source_files[0][0]], tmpIdpDir)
# Copy libraries into webapp
idp3WebappLibFolder = os.path.join(self.idp3WebappFolder, 'WEB-INF/lib')
self.createDirs(idp3WebappLibFolder)
self.copyTree(os.path.join(tmpIdpDir, 'WEB-INF/lib'), idp3WebappLibFolder)
self.removeDirs(tmpIdpDir)
def saml_couchbase_settings(self):
if not Config.get('couchbaseShibUserPassword'):
Config.couchbaseShibUserPassword = self.getPW()
shib_user = 'couchbaseShibUser'
shib_user_roles = 'query_select[*]'
if Config.get('isCouchbaseUserAdmin'):
self.logIt("Creating couchbase readonly user for shib")
self.dbUtils.cbm.create_user(shib_user, Config.couchbaseShibUserPassword, 'Shibboleth IDP', shib_user_roles)
else:
Config.post_messages.append('Please create a user on Couchbase Server with the following credidentals and roles')
Config.post_messages.append('Username: {}'.format(shib_user))
Config.post_messages.append('Password: {}'.format(Config.couchbaseShibUserPassword))
Config.post_messages.append('Roles: {}'.format(shib_user_roles))
def saml_persist_configurations(self):
if Config.persistence_type in (PersistenceType.couchbase, PersistenceType.sql):
# Add datasource.properties to idp.properties
idp3_configuration_properties_fn = os.path.join(self.idp3ConfFolder, self.idp3_configuration_properties)
with open(idp3_configuration_properties_fn) as f:
idp3_properties = f.readlines()
for i,l in enumerate(idp3_properties[:]):
if l.strip().startswith('idp.additionalProperties'):
idp3_properties[i] = l.strip() + ', /conf/datasource.properties\n'
new_idp3_props = ''.join(idp3_properties)
self.writeFile(idp3_configuration_properties_fn, new_idp3_props, backup=False)
if Config.persistence_type == 'sql':
self.data_source_properties = self.data_source_properties + '.sql'
bean_formatter = 'rdbm'
else:
bean_formatter = 'couchbase'
self.renderTemplateInOut(self.data_source_properties, self.templates_folder, self.output_folder)
idp_data_source_fn = os.path.join(self.idp3ConfFolder, 'datasource.properties')
self.copyFile(self.data_source_properties, idp_data_source_fn)
self.run([paths.cmd_chmod, '0600', idp_data_source_fn])
bean_xml = os.path.join(self.staticIDP3FolderConf, 'gluu-{}-bean.xml'.format(bean_formatter))
self.copyFile(bean_xml, self.idp3ConfFolder)
def create_folders(self):
self.createDirs(os.path.join(Config.gluuBaseFolder, 'conf/shibboleth3'))
self.createDirs(os.path.join(self.jetty_base, 'identity/conf/shibboleth3/idp'))
self.createDirs(os.path.join(self.jetty_base, 'identity/conf/shibboleth3/sp'))
for folder in (self.idp3Folder, self.idp3MetadataFolder, self.idp3MetadataCredentialsFolder,
self.idp3LogsFolder, self.idp3LibFolder, self.idp3ConfFolder,
self.idp3ConfAuthnFolder, self.idp3CredentialsFolder, self.idp3WebappFolder):
self.run([paths.cmd_mkdir, '-p', folder])
| |
#!/usr/bin/env python
import commands
import os
import popen2
import quopri
import base64
import re
import shutil
import string
import sys
import time
# Fix for older versions of Python
try:
True
except NameError:
True,False = 1,0
# Singleton-like design pattern
# See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531
# class Constants:
# __shared_state = {}
# def __init__(self):
# self.__dict__ = self.__shared_state
# Import platform_specific data
import platform_specific
def printable(s):
"Convert a string to only printable characters"
# Apparently, the last two characters in string.printable are not XML-friendly
# This code could be problematic if string.printable's order varies by machine
return ''.join([c for c in s if c in string.printable[:98]])
def run(prog,args):
"""Run a program and return true if succeeds, false if fails
prog: Name of the program
args: List of command-line arguments"""
status = os.spawnv(os.P_WAIT,prog,[prog] + args)
success = os.WIFEXITED(status) and os.WEXITSTATUS(status)==0
return success
def getlogin():
"""Return the name of the login
We try several things until something works"""
try:
return os.environ['USER']
except:
try:
return os.environ['LOGNAME']
except:
try:
import pwd
return pwd.getpwuid(os.getuid())[0]
except:
return os.getlogin()
def hasextension(fname):
"""Check if a filename has an extension"""
return fname.find('.')!=-1
def issourcefile(fname):
"""Check if the file name corresponds to a source file.
For now, all files that have an extension that isn't .exe or .a or .o"""
return hasextension(fname) and True not in [fname.endswith(x) for x in ['.o','.exe','.a'] ]
def getlogfilepath(logfiledir):
"""Return the full path of the logfile"""
return os.path.join(logfiledir,getlogin() + '.log')
def getcommand(argv):
"""Retrieve a string version of the command that was invoked at the shell
We can't get it exactly because the shell does substitutions on the
command-line arguments."""
return ' '.join(argv)
# Parse through source code and retrieve headers
headerpat = re.compile(r'#include[ \t]+"(.*)"')
def parseheaders(source):
"""Extract the names of local header files from source code. Not smart enough to deal
with comments"""
return headerpat.findall(source)
def unique(alist):
"""Return unique elements from a list.
Taken from comments in http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560"""
myset = {}
return [myset.setdefault(e,e) for e in alist if e not in myset]
def flatten(alist):
"""Flatten a list. Each element in the list must itself be a list"""
return [x for y in alist for x in y]
def encode(string, encoding):
if encoding == 'quopri':
return quopri.encodestring(string)
elif encoding == 'base64':
return base64.encodestring(string)
elif encoding == 'raw':
return string
else:
return string
class CVSUtils:
"""Interacts with CVS."""
def __init__(self,sandbox,commitmsg):
"""sandbox - CVS sandbox directory which will be used for commits"""
self.sandbox = sandbox
self.commitmsg = commitmsg
def commit_files_to_cvs(self,files):
""" Commit the sourcefiles and headerfiles to CVS"""
for f in files:
self._copy_to_sandbox(f)
self._commit_sandbox_files()
def _copy_to_sandbox(self,fname):
""" Copy a file to the sandbox, creating directories and adding to CVS when necessary.
Does not do a commit"""
dest = self._change_base_directory(os.path.abspath(fname),self.sandbox)
self._check_and_create_dir(os.path.dirname(dest))
shutil.copy(fname,dest)
# We don't always need to add the file, but it's easier to try and add it every time
(status,output) = commands.getstatusoutput("cd %s ; cvs add %s" % (self.sandbox,dest))
if status!=0:
# Only complain if it's not an "already exists" problem
if output.find('already exists')==-1:
raise ValueError, "Could not add file %s: %s" % (dest,output)
def _check_and_create_dir(self,dirname):
"""Check if a directory exists, and if not, create it in the sandbox and and commit it.
The directory must be within the sandbox"""
if not os.path.exists(dirname):
# If it's not there, check the parent directory
self._check_and_create_dir(os.path.dirname(dirname))
os.mkdir(dirname)
rel_dirname = self._relative_path(dirname,self.sandbox)
(status,output) = commands.getstatusoutput("cd % s ; cvs add %s " % (self.sandbox,rel_dirname))
if status!=0:
raise ValueError, "Could not add directory %s: %s" % (dirname,output)
def _commit_sandbox_files(self):
"""Commits all of the files currently in the sandbox.
Returns the output of the CVS commit command"""
#return commands.getoutput("cd %s ; cvs commit -f -R -m ' ' ." % self.sandbox)
return commands.getoutput("cd %s ; cvs commit -m '%s' ." % (self.sandbox, self.commitmsg))
def gethomepaths(self):
""" Get the list of home directory paths. Some environments have
a number of different absolute paths mapped to the use home directory,
which becomes an issue when capturing the code to cvs.
It typically happens when the value of $HOME is different from
the standard naming convention on the filesystem.
The method used here is a little hackyish.
"""
cwd = os.getcwd()
home_dir = os.path.expanduser('~')
os.chdir(home_dir)
fs_dir = os.path.abspath('.')
os.chdir(cwd) # I hope this will always get you back to the original place...
if home_dir!= fs_dir:
return [home_dir, fs_dir]
else:
return [home_dir]
def _change_base_directory(self,fname,basename):
""" Change the base directory of fname from oldbase to newbase
Absolute path of files must be used!"""
# Compensate the possible problem with incompatible HOME paths
bases = self.gethomepaths()
for base in bases:
if os.path.commonprefix([fname,base]) == base:
fname = fname.replace(base, '/_HOME', 1)
# Drop leading delimiter
# FIXME: the following line is not portable...
return os.path.join(basename, fname[1:])
def _relative_path(self,fname,base):
"""Create a relative path from an absolute and a base"""
if os.path.commonprefix([fname,base])!=base:
raise ValueError, "Unexpected base in file" + fname
# Make sure base ends in a slash, or the following will fail
if base[-1] != '/':
base = base + '/'
return fname.replace(base,'')
class CompileData:
"""Holds data associated with a compile"""
def __init__(self,starttime,endtime,sourcefiles,subject,command,success,path,debugging=None,cvs=None):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.sourcefiles = sourcefiles
self.subject = subject
self.command = command
self.time_interval = endtime-starttime
self.success = success
self.path = path
self.debugging = debugging
self.cvs = cvs
self.encoding = platform_specific.encoding # 'base64', 'quopri' or 'raw'
self.headerfiles = []
# Determine the headerfiles
for sourcefile in self.sourcefiles:
srcdir = os.path.dirname(sourcefile)
candidates = parseheaders(open(sourcefile).read())
for h in candidates:
headerpath = os.path.join(srcdir,h)
if os.path.exists(headerpath):
self.headerfiles.append(headerpath)
def getcvscomment(self):
cxml_cvs = '''<compile success="%(success)d" %(debug)s>
<timestr>%(timestr)s</timestr>
<time_interval>%(time_interval).2f</time_interval>
</compile>
'''
if self.debugging is not None:
debug = 'debugging="%d"' % self.debugging
else:
debug = ''
return cxml_cvs % {
'timestr' : self.timestr,
'command' : self.command,
'success' : self.success,
'debug' : debug,
'time_interval' : self.time_interval
}
def addtocvs(self,sandbox):
"""Add the files to the CVS repository.
sandbox - location of CVS sandbox where files need to be copied and committed."""
commitmsg = self.getcvscomment()
cvs = CVSUtils(sandbox, commitmsg)
cvs.commit_files_to_cvs(self.sourcefiles+self.headerfiles)
def toxml(self):
sfxml = '''<sourcefile success="%(success)d">
<name>%(name)s</name>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
<path>%(path)s</path>
<source encode="%(encoding)s"><![CDATA[%(source)s]]></source>
</sourcefile>
'''
hfxml = '''<headerfile>
<name>%(name)s</name>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
<path>%(path)s</path>
<source encode="%(encoding)s"><![CDATA[%(source)s]]></source>
</headerfile>
'''
cxml = '''<compile success="%(success)d" %(debug)s>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
</compile>
'''
if self.debugging is not None:
debug = 'debugging="%d"' % self.debugging
else:
debug = ''
return '\n'.join([cxml % {
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'success' : self.success,
'debug' : debug,
'time_interval' : self.time_interval}] +
[sfxml % {'name' : name,
'success' : self.success,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path,
'encoding' : self.encoding,
'source' : encode(open(name).read(), self.encoding)}
for name in self.sourcefiles] +
[hfxml % {'name' : name,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path,
'encoding' : self.encoding,
'source' : encode(open(name).read(), self.encoding)}
for name in self.headerfiles])
def set_compiler_invoked():
os.environ['UMDINST_COMPILER_INVOKED']="1"
def compiler_already_invoked():
try:
val = os.environ['UMDINST_COMPILER_INVOKED']
return True
except KeyError:
return False
def ask_if_debugging():
c = ''
while c not in ['y','n']:
c = raw_input("Are you debugging? [y/n]: ").lower()
return c=='y'
def is_in_whitelist(subject,whitelistfile):
try:
approved = [x.rstrip() for x in open(whitelistfile).readlines()]
return subject in approved
except TypeError: # whitelistfile==None
return False
def identify_sourcefiles(args):
"""Identify source files from a list of command-line arguments.
args - Command-line arguments (does not include name of program)"""
# If there's a -o and a filename, remove it
try:
ind = args.index('-o')
del args[ind+1]
except:
pass
# Return all arguments that don't start with -, that are sourcefiles, and that are accessible
return [fname for fname in args if fname[0]!='-' and issourcefile(fname) and os.access(fname,os.R_OK)]
def capture_compile(compiler,argv=sys.argv,logex=None):
"""Capture information associated with a compile.
Return true if compile succeeded, else false"""
sandbox=os.path.expanduser(platform_specific.sandbox)
whitelistfile=platform_specific.whitelistfile #os.path.expanduser(platform_specific.whitelistfile)
starttime = time.time()
args = argv[1:]
success = run(compiler,args)
# If compile succeeded, ask if debugging
subject = getlogin()
if success and is_in_whitelist(subject,whitelistfile):
is_debugging = ask_if_debugging()
else:
is_debugging = None
endtime = time.time()
c = CompileData(starttime=starttime,
endtime=endtime,
sourcefiles=identify_sourcefiles(args),
subject=subject,
command=' '.join(argv),
success=success,
path = os.getcwd(),
debugging=is_debugging)
if platform_specific.with_privatecvs:
#print "Writing to CVS..."
if sandbox is not None:
c.addtocvs(sandbox) # Add the files to CVS
if platform_specific.with_privatelog:
#print "Writing to Private logfile..."
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(c.toxml())
f.close()
if platform_specific.with_pooledlog:
#print "Writing to Pooled logfile..."
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(c.toxml())
f.close()
os.chmod(logfile,0644)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(c.toxml())
f.close()
os.chmod(logfile,0644)
if platform_specific.with_workflow:
print "Invoking the online workflow tool..."
return success
def capture_interactive_run(runprog,argv=sys.argv,logex=None):
"""Capture information associated with an interactive run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
ir = InteractiveRunData(starttime=starttime,
endtime=endtime,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_batch_run(runprog,argv=sys.argv,logex=None):
"""Capture information associated with a bactch run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
# Identify which file is the script file
fnames = [fname for fname in args if os.access(fname,os.R_OK)]
# There should only be either 1 or 0 args
# if there are more than one, just take the first
if len(fnames)>0:
fname = fnames[0]
script = open(fname).read()
else:
script = ''
br = BatchRunData(starttime=starttime,
endtime=endtime,
script=script,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(br.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(br.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(br.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_profiled_run(runprog,outfile,argv=sys.argv,logex=None):
"""Capture information associated with an interactive run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
# If the file can't be read, just keep the field blank
try:
profiledata=open(outfile).read()
except:
profiledata = ' '
pr = ProfileRunData(starttime=starttime,
endtime=endtime,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd(),
profiledata=profiledata)
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_profile_report(runprog,argv=sys.argv,logex=None):
"""Capture information associated with a profile report generation program
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
(status, output) = commands.getstatusoutput(' '.join([runprog]+args))
endtime = time.time()
# Send the output to standard out
print output
if status==0:
success = True
else:
success = False
subject = getlogin()
rep = ProfilerReporterData(starttime=starttime,
endtime=endtime,
subject=subject,
command=' '.join(argv),
success=success,
path= os.getcwd(),
reportdata = output)
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_debugger(debuggerprog,argv=sys.argv,logex=None):
"""Capture information associated with a debugger
Return true if debugger succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(debuggerprog,args)
endtime = time.time()
subject = getlogin()
deb = DebuggerData(starttime=starttime,
endtime=endtime,
subject=subject,
command=' '.join(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(deb.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(deb.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_make(makeprog,logex=None):
starttime = time.time()
args = sys.argv[1:]
success = run(makeprog,args)
endtime = time.time()
c = MakeData(starttime,
endtime,
get_makefilename(args),
getlogin(),
' '.join(sys.argv),
success)
raise ValueError,"This function has not been implemented properly yet!"
class AbstractRunData:
"""Parent class for RunData children
Children must define a type() method and an extrafields() method"""
def __init__(self,starttime,endtime,subject,command,success,path):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.time_interval = endtime-starttime
self.subject = subject
self.command = command
self.success = success
self.path = path
def toxml(self):
xml = '''<job type="%(type)s" success="%(success)d">
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
%(extra)s
</job>
'''
return xml % {
'type':self.type(),
'success':self.success,
'subject':self.subject,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'time_interval' : self.time_interval,
'path' : self.path,
'command' : self.command,
'extra': self.extrafields() }
class InteractiveRunData(AbstractRunData):
"""Holds data associated with an interactive run"""
def __init__(self,starttime,endtime,subject,command,success,path):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
def type(self):
return "interactive"
def extrafields(self):
return ""
class BatchRunData(AbstractRunData):
"""Holds data associated with a batch run"""
def __init__(self,starttime,endtime,script,subject,command,success,path):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
self.script = script
self.encoding = platform_specific.encoding # 'base64', 'quopri' or 'raw'
def type(self):
return "batch"
def extrafields(self):
return '<script encode="%s"><![CDATA[%s]]></script>' % (self.encoding, encode(self.script, self.encoding))
class ProfileRunData(AbstractRunData):
"""Holds data associated with a profiled run"""
def __init__(self,starttime,endtime,subject,command,success,path,profiledata):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
self.profiledata = profiledata
def type(self):
return "profiled"
def extrafields(self):
return ''.join(["<profiledata><![CDATA[",self.profiledata,"]]></profiledata>"])
class ProfilerReporterData:
def __init__(self,
starttime,
endtime,
subject,
command,
success,
path,
reportdata):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.subject = subject
self.command = command
self.path = path
self.time_interval = endtime-starttime
self.reportdata = reportdata
def toxml(self):
return """<profile_report>
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
<contents><![CDATA[%(reportdata)s]]></contents>
</profile_report>""" % {'subject':self.subject,
'timestamp': self.timestamp,
'timestr' : self.timestr,
'command' : self.command,
'path' : self.path,
'time_interval' : self.time_interval,
'reportdata':self.reportdata}
class DebuggerData:
"""Data associated with the invocation of a debugger"""
def __init__(self,starttime,endtime,subject,command,success,path):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.subject = subject
self.command = command
self.success = success
self.path = path
self.time_interval = endtime-starttime
def toxml(self):
return """<debug success='%(success)d'>
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
</debug>""" % {
'success':self.success,
'subject':self.subject,
'timestamp': self.timestamp,
'timestr' : self.timestr,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path}
class MakeData:
def __init__(self,starttime,endtime,subject,command,success,path):
pass
def toxml(self):
return "<make></make>"
if __name__=='__main__':
compiler = '/usr/bin/gcc'
#capture_compile(compiler,platform_specific.logfiledir)
capture_compile(compiler)
| |
from unittest import TestCase
import networkx
from six import StringIO
from gtfspy.routing.connection import Connection
from gtfspy.routing.label import min_arrival_time_target, LabelTimeWithBoardingsCount, LabelTime
from gtfspy.routing.multi_objective_pseudo_connection_scan_profiler import MultiObjectivePseudoCSAProfiler
from gtfspy.routing.node_profile_multiobjective import NodeProfileMultiObjective
import pyximport
pyximport.install()
class TestMultiObjectivePseudoCSAProfiler(TestCase):
# noinspection PyAttributeOutsideInit
def setUp(self):
event_list_raw_data = [
(2, 4, 40, 50, "trip_6", 1),
(1, 3, 32, 40, "trip_5", 1),
(3, 4, 32, 35, "trip_4", 1),
(2, 3, 25, 30, "trip_3", 1),
(1, 2, 10, 20, "trip_2", 1),
(0, 1, 0, 10, "trip_1", 1)
]
self.transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
self.walk_network = networkx.Graph()
self.walk_network.add_edge(1, 2, d_walk=20)
self.walk_network.add_edge(3, 4, d_walk=15)
self.walk_speed = 1
self.target_stop = 4
self.transfer_margin = 0
self.start_time = 0
self.end_time = 50
def test_pseudo_connections(self):
event_list_raw_data = [
(0, 1, 10, 20, "trip_6", 1),
(2, 3, 42, 50, "trip_5", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_speed = 1
target_stop = 3
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
self.assertEqual(len(csa_profile._all_connections), 3)
pseudo_connection = csa_profile._all_connections[1]
self.assertTrue(pseudo_connection.is_walk)
self.assertEqual(pseudo_connection.departure_time, 42 - 20)
self.assertEqual(pseudo_connection.arrival_time, 42)
self.assertEqual(pseudo_connection.departure_stop, 1)
self.assertEqual(pseudo_connection.arrival_stop, 2)
node_to_connection_dep_times = {
0: [10],
1: [42 - 20],
2: [42],
3: [],
}
for node, dep_times in node_to_connection_dep_times.items():
profile = csa_profile._stop_profiles[node]
for dep_time in dep_times:
self.assertIn(dep_time, profile.dep_times_to_index, "Node: " + str(node))
for dep_time in profile.dep_times_to_index:
self.assertIn(dep_time, dep_times, "Node: " + str(node))
for connection in csa_profile._all_connections:
arrival_stop_profile = csa_profile._stop_profiles[connection.arrival_stop]
departure_stop_profile = csa_profile._stop_profiles[connection.departure_stop]
self.assertIsInstance(arrival_stop_profile, NodeProfileMultiObjective)
self.assertIsInstance(departure_stop_profile, NodeProfileMultiObjective)
self.assertIn(connection.departure_time, departure_stop_profile.dep_times_to_index)
if connection.arrival_stop_next_departure_time != float('inf'):
self.assertIn(connection.arrival_stop_next_departure_time, arrival_stop_profile.dep_times_to_index)
def test_pseudo_connections_with_transfer_margin(self):
event_list_raw_data = [
(0, 1, 10, 20, "trip_6", 1),
(2, 3, 42, 50, "trip_5", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=10)
walk_speed = 1
target_stop = 3
transfer_margin = 5
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
transfer_connection = csa_profile._all_connections[1]
self.assertEqual(transfer_connection.arrival_stop, 2)
self.assertEqual(transfer_connection.arrival_stop_next_departure_time, 42)
self.assertEqual(transfer_connection.departure_stop, 1)
self.assertEqual(transfer_connection.departure_time, 42 - 10)
self.assertEqual(transfer_connection.is_walk, True)
self.assertEqual(transfer_connection.arrival_time, 42)
def test_basics(self):
csa_profile = MultiObjectivePseudoCSAProfiler(self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed)
csa_profile.run()
stop_3_labels = csa_profile.stop_profiles[3].get_final_optimal_labels()
self.assertEqual(len(stop_3_labels), 1)
self.assertIn(LabelTimeWithBoardingsCount(32, 35, n_boardings=1, first_leg_is_walk=False), stop_3_labels)
stop_2_labels = csa_profile.stop_profiles[2].get_final_optimal_labels()
self.assertEqual(len(stop_2_labels), 3)
self.assertIn(LabelTimeWithBoardingsCount(40, 50, n_boardings=1, first_leg_is_walk=False), stop_2_labels)
self.assertIn(LabelTimeWithBoardingsCount(25, 35, n_boardings=2, first_leg_is_walk=False), stop_2_labels)
self.assertIn(LabelTimeWithBoardingsCount(25, 45, n_boardings=1, first_leg_is_walk=False), stop_2_labels)
stop_one_profile = csa_profile.stop_profiles[1]
stop_one_pareto_labels = stop_one_profile.get_final_optimal_labels()
labels = list()
# these should exist at least:
labels.append(LabelTimeWithBoardingsCount(departure_time=10, arrival_time_target=35, n_boardings=3, first_leg_is_walk=False))
labels.append(LabelTimeWithBoardingsCount(departure_time=20, arrival_time_target=50, n_boardings=1, first_leg_is_walk=False))
labels.append(LabelTimeWithBoardingsCount(departure_time=32, arrival_time_target=55, n_boardings=1, first_leg_is_walk=False))
def test_multiple_targets(self):
event_list_raw_data = [
(1, 4, 40, 50, "trip", 1),
(1, 5, 30, 40, "trip", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_speed = 1
source_stop = 1
targets = [4, 5]
transfer_margin = 0
start_time = 0
end_time = 60
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, targets,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_stop_profile = csa_profile.stop_profiles[source_stop]
final_labels = source_stop_profile.get_final_optimal_labels()
self.assertEqual(2, len(final_labels))
def test_simple(self):
event_list_raw_data = [
(2, 4, 40, 50, "trip_5", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_network.add_edge(3, 4, d_walk=15)
walk_speed = 1
source_stop = 1
target_stop = 4
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_stop_profile = csa_profile.stop_profiles[source_stop]
self.assertTrue(source_stop_profile._finalized)
self.assertTrue(source_stop_profile._closed)
source_stop_labels = source_stop_profile.get_final_optimal_labels()
labels = list()
labels.append(LabelTimeWithBoardingsCount(departure_time=20,
arrival_time_target=50,
n_boardings=1,
first_leg_is_walk=True))
self._assert_label_sets_equal(
labels,
source_stop_labels
)
def test_last_leg_is_walk(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_speed = 1
source_stop = 0
target_stop = 2
transfer_margin = 0
start_time = 0
end_time = 50
labels = list()
labels.append(LabelTimeWithBoardingsCount(departure_time=0, arrival_time_target=30, n_boardings=1, first_leg_is_walk=False))
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
found_tuples = csa_profile.stop_profiles[source_stop].get_final_optimal_labels()
self._assert_label_sets_equal(found_tuples, labels)
def test_walk_is_faster_than_by_trip(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 0.5
source_stop = 0
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
walk_network = networkx.Graph()
walk_network.add_edge(0, 1, d_walk=1)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, first_leg_can_be_walk=True)), 2)
found_tuples = source_profile.get_final_optimal_labels()
self.assertEqual(len(found_tuples), 0)
def test_no_multiple_walks(self):
event_list_raw_data = [
(0, 1, 0, 1, "trip_1", 1),
(1, 0, 0, 1, "trip_2", 1),
(0, 1, 2, 3, "trip_3", 1),
(1, 0, 2, 3, "trip_4", 1),
(0, 1, 4, 5, "trip_5", 1),
(1, 0, 4, 5, "trip_6", 1),
(1, 2, 5, 6, "trip_7", 1),
(2, 1, 5, 6, "trip_8", 1),
(1, 2, 2, 3, "trip_7", 2),
(2, 1, 2, 3, "trip_8", 2)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(0, 1, d_walk=1)
walk_network.add_edge(2, 1, d_walk=1)
walk_speed = 10
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, 2,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[0]
print(source_profile.get_final_optimal_labels())
for label in source_profile.get_final_optimal_labels():
self.assertGreater(label.n_boardings, 0)
def test_target_node_not_in_walk_network(self):
event_list_raw_data = [
(0, 1, 0, 10, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 2
source_stop = 0
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
walk_network = networkx.Graph()
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, 0)), 10)
found_tuples = source_profile.get_final_optimal_labels()
self.assertEqual(len(found_tuples), 1)
def test_pareto_optimality(self):
event_list_raw_data = [
(0, 2, 0, 10, "trip_1", 1),
(0, 1, 2, 5, "trip_2", 1),
(1, 2, 5, 8, "trip_3", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_speed = 2
source_stop = 0
target_stop = 2
transfer_margin = 0
start_time = 0
end_time = 20
walk_network = networkx.Graph()
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
source_profile = csa_profile.stop_profiles[source_stop]
self.assertEqual(min_arrival_time_target(source_profile.evaluate(0, 0)), 8)
found_labels = source_profile.get_final_optimal_labels()
labels_should_be = list()
labels_should_be.append(LabelTimeWithBoardingsCount(0, 10, n_boardings=1, first_leg_is_walk=False))
labels_should_be.append(LabelTimeWithBoardingsCount(2, 8, n_boardings=2, first_leg_is_walk=False))
self._assert_label_sets_equal(found_labels, labels_should_be)
def test_transfer_margin(self):
walk_speed = 1
target_stop = 2
start_time = 0
end_time = 60
transit_connections = [
Connection(0, 1, 40, 50, "trip_1", 1),
Connection(1, 2, 50, 60, "trip_1", 2),
Connection(3, 1, 40, 50, "trip_2", 1),
]
# case without any transfer margin
transfer_margin = 0
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
stop_profile_1 = csa_profile.stop_profiles[1]
stop_profile_3 = csa_profile.stop_profiles[3]
self.assertEqual(1, len(stop_profile_1.get_final_optimal_labels()))
self.assertEqual(1, len(stop_profile_3.get_final_optimal_labels()))
# case with transfer margin
transfer_margin = 1
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
stop_profile_3 = csa_profile.stop_profiles[3]
stop_profile_1 = csa_profile.stop_profiles[1]
self.assertEqual(0, len(stop_profile_3.get_final_optimal_labels()))
self.assertEqual(1, len(stop_profile_1.get_final_optimal_labels()))
def test_possible_transfer_margin_bug_with_multiple_arrivals(self):
walk_speed = 1
target_stop = 3
start_time = 0
end_time = 200
transfer_margin = 2
transit_connections = [
Connection(0, 1, 100, 101, "trip_0", 1),
Connection(4, 1, 102, 104, "trip_1", 1),
Connection(2, 3, 106, 108, "trip_2", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=1)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
profile = csa_profile.stop_profiles[4]
self.assertEqual(len(profile.get_final_optimal_labels()), 0)
profile = csa_profile.stop_profiles[0]
self.assertEqual(len(profile.get_final_optimal_labels()), 1)
def test_transfer_margin_with_walk(self):
walk_speed = 1
target_stop = 3
start_time = 0
end_time = 2000
transit_connections = [
Connection(0, 1, 1000, 1010, "trip__2", 1),
Connection(0, 1, 1010, 1020, "trip__1", 1),
Connection(0, 1, 1020, 1030, "trip_0", 1),
Connection(0, 1, 1000, 1010, "trip_1", 1),
Connection(0, 1, 1010, 1020, "trip_2", 1),
Connection(0, 1, 1020, 1030, "trip_3", 1),
Connection(0, 1, 1030, 1040, "trip_4", 1),
Connection(2, 3, 1060, 1070, "trip_6", 1),
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=5)
transfer_margins = [10, 20, 30, 40, 0]
journey_dep_times = [1030, 1020, 1010, 1000, 1030]
for transfer_margin, dep_time in zip(transfer_margins, journey_dep_times):
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profile.run()
profile = csa_profile.stop_profiles[0]
self.assertEqual(len(profile.get_final_optimal_labels()), 1, "transfer_margin=" + str(transfer_margin))
label = profile.get_final_optimal_labels()[0]
self.assertEqual(label.departure_time, dep_time, "transfer_margin=" + str(transfer_margin))
def test_basics_no_transfer_tracking(self):
csa_profile = MultiObjectivePseudoCSAProfiler(
self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed, track_vehicle_legs=False
)
csa_profile.run()
stop_3_pareto_tuples = csa_profile.stop_profiles[3].get_final_optimal_labels()
self.assertEqual(len(stop_3_pareto_tuples), 1)
self.assertIn(LabelTime(32., 35.), stop_3_pareto_tuples)
stop_2_pareto_tuples = csa_profile.stop_profiles[2].get_final_optimal_labels()
self.assertEqual(len(stop_2_pareto_tuples), 2)
self.assertIn(LabelTime(40., 50.), stop_2_pareto_tuples)
self.assertIn(LabelTime(25., 35.), stop_2_pareto_tuples)
source_stop_profile = csa_profile.stop_profiles[1]
source_stop_pareto_optimal_tuples = source_stop_profile.get_final_optimal_labels()
pareto_tuples = list()
pareto_tuples.append(LabelTime(departure_time=10, arrival_time_target=35))
pareto_tuples.append(LabelTime(departure_time=20, arrival_time_target=50))
pareto_tuples.append(LabelTime(departure_time=32, arrival_time_target=55))
self._assert_label_sets_equal(
pareto_tuples,
source_stop_pareto_optimal_tuples
)
def test_transfers_only(self):
event_list_raw_data = [
(7, 2, 20, 30, "trip_6", 1),
(2, 4, 40, 50, "trip_5", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 2, d_walk=20)
walk_network.add_edge(3, 4, d_walk=15)
walk_speed = 1
target_stop = 4
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_time=False)
csa_profile.run()
stop_to_n_boardings = {
2: 1,
7: 2,
3: 0
}
for stop, n_veh_legs in stop_to_n_boardings.items():
labels = csa_profile.stop_profiles[stop].get_final_optimal_labels()
self.assertEqual(len(labels), 1)
self.assertEqual(labels[0].n_boardings, n_veh_legs)
def test_reset(self):
walk_speed = 1
target_stop = 2
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 1, 40, 50, "trip_1", 1),
Connection(1, 2, 55, 60, "trip_1", 1),
Connection(3, 1, 40, 60, "trip_2", 1)
]
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profile.run()
nodes = [0, 1, 2, 3]
label_counts = [1, 1, 0, 0]
for node, count in zip(nodes, label_counts):
n_labels = len(csa_profile.stop_profiles[node].get_final_optimal_labels())
self.assertEqual(n_labels, count)
target_stops = [1]
csa_profile.reset(target_stops)
csa_profile.run()
label_counts = [1, 0, 0, 1]
for node, count in zip(nodes, label_counts):
n_labels = len(csa_profile.stop_profiles[node].get_final_optimal_labels())
self.assertEqual(n_labels, count)
# TODO: perform a check for the reinitialization of trip_labels
# THIS IS NOT YET TESTED but should work at the moment
# RK 9.1.2017
def test_550_problem(self):
# There used to be a problem when working with real unixtimes (c-side floating point number problems),
# this test is one check for that
event_data = StringIO(
"from_stop_I,to_stop_I,dep_time_ut,arr_time_ut,route_type,route_id,trip_I,seq\n" +
"2198,2247,1475530740,1475530860,3,2550,158249,36\n" +
"2247,2177,1475530860,1475530980,3,2550,158249,37\n")
import pandas as pd
events = pd.read_csv(event_data)
events.sort_values("dep_time_ut", ascending=False, inplace=True)
connections = [
Connection(int(e.from_stop_I), int(e.to_stop_I), int(e.dep_time_ut), int(e.arr_time_ut),
int(e.trip_I),
int(e.seq))
for e in events.itertuples()
]
csa_profiler = MultiObjectivePseudoCSAProfiler(connections, 2177,
0, 1475530860*10, 0,
networkx.Graph(), 0)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
labels_2198 = profiles[2198].get_final_optimal_labels()
self.assertEqual(len(labels_2198), 1)
self.assertEqual(labels_2198[0].duration(), 1475530980 - 1475530740)
labels_2247 = profiles[2247].get_final_optimal_labels()
self.assertEqual(len(labels_2247), 1)
self.assertEqual(labels_2247[0].duration(), 1475530980 - 1475530860)
def test_transfer_on_same_stop_with_multiple_departures(self):
walk_speed = 1000
target_stop = 5
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 4, 30, 40, "trip_1", 1),
Connection(4, 1, 50, 60, "trip_2", 1),
Connection(4, 2, 50, 60, "trip_3", 1),
Connection(4, 3, 50, 60, "trip_4", 1),
Connection(4, target_stop, 70, 100, "trip_5", 1)
]
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
networkx.Graph(), walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(profiles[0].get_final_optimal_labels()[0])
assert(len(profiles[0].get_final_optimal_labels()) > 0)
def test_transfer_connections_do_not_affect_transfers(self):
walk_speed = 1000
target_stop = 1233412
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(0, 1, 30, 40, "trip_1", 1),
Connection(3, 4, 45, 50, "trip_2", 1),
Connection(4, 3, 45, 50, "trip_3", 1),
Connection(5, 3, 45, 50, "trip_4", 1),
Connection(1, target_stop, 70, 100, "trip_5", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(1, 3, d_walk=1)
walk_network.add_edge(1, 4, d_walk=1)
walk_network.add_edge(1, 5, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(profiles[0].get_final_optimal_labels()[0])
assert(len(profiles[0].get_final_optimal_labels()) > 0)
def test_transfer_connections_do_not_affect_transfers2(self):
walk_speed = 1
target_stop = 0
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(3, 0, 10, 11, "trip_1", 1),
Connection(2, 1, 5, 6, "trip_2", 1),
Connection(4, 3, 0, 1, "trip_3", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(2, 3, d_walk=1)
walk_network.add_edge(1, 0, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
assert(len(profiles[4].get_final_optimal_labels()) == 1)
optimal_label = profiles[4].get_final_optimal_labels()[0]
self.assertEqual(optimal_label.departure_time, 0)
self.assertEqual(optimal_label.arrival_time_target, 7)
self.assertEqual(optimal_label.n_boardings, 2)
def test_transfer_connections_do_not_affect_transfers3(self):
walk_speed = 1
target_stop = 0
start_time = 0
end_time = 60
transfer_margin = 0
transit_connections = [
Connection(3, 0, 10, 11, "t1", 1),
Connection(2, 1, 5, 6, "t2", 1),
Connection(7, 2, 3, 4, "tX", 1),
Connection(5, 6, 2, 3, "--", 1),
Connection(4, 3, 0, 1, "t3", 1)
]
walk_network = networkx.Graph()
walk_network.add_edge(7, 3, d_walk=1)
walk_network.add_edge(1, 0, d_walk=1)
walk_network.add_edge(5, 3, d_walk=1)
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed)
csa_profiler.run()
profiles = csa_profiler.stop_profiles
print(profiles[4].get_final_optimal_labels()[0])
optimal_labels = profiles[4].get_final_optimal_labels()
assert(len(optimal_labels) == 2)
boardings_to_arr_time = {}
for label in optimal_labels:
boardings_to_arr_time[label.n_boardings] = label.arrival_time_target
self.assertEqual(boardings_to_arr_time[2], 11)
self.assertEqual(boardings_to_arr_time[3], 7)
def _assert_label_sets_equal(self, found_tuples, should_be_tuples):
self.assertEqual(len(found_tuples), len(should_be_tuples))
for found_tuple in found_tuples:
self.assertIn(found_tuple, should_be_tuples)
for should_be_tuple in should_be_tuples:
self.assertIn(should_be_tuple, found_tuples)
def test_stored_route(self):
# TODO:
# - test with multiple targets
# - test with continuing route
# - test that timestamps for label and the connection objects match
csa_profile = MultiObjectivePseudoCSAProfiler(self.transit_connections, self.target_stop,
self.start_time, self.end_time, self.transfer_margin,
self.walk_network, self.walk_speed, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for bag in profile._label_bags:
for label in bag:
# print(stop, label)
cur_label = label
journey_legs = []
while True:
connection = cur_label.connection
if isinstance(connection, Connection):
journey_legs.append(connection)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
route_tuples_list = [(x.departure_stop, x.arrival_stop) for x in journey_legs]
# print(route_tuples_list)
# test that all legs are unique
self.assertEqual(len(route_tuples_list), len(set(route_tuples_list)))
prev_arr_node = None
for route_tuple in route_tuples_list:
dep_node = route_tuple[0]
arr_node = route_tuple[1]
# test that all legs have unique departure and arrival nodes
self.assertNotEqual(dep_node, arr_node)
if prev_arr_node:
# test that legs form an continuous path
self.assertEqual(prev_arr_node, dep_node)
prev_arr_node = arr_node
def test_target_self_loops(self):
event_list_raw_data = [
(3, 1, 30, 40, "trip_3", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(1, 3, d_walk=11)
walk_speed = 1
target_stop = 1
transfer_margin = 0
start_time = 0
end_time = 50
print(walk_network.edges())
print(transit_connections)
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=True,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
if stop == target_stop:
self.assertEqual(len(profile.get_final_optimal_labels()), 0)
def test_journeys_using_movement_duration(self):
def unpack_route_from_labels(cur_label):
route = []
last_arrival_stop = None
while True:
connection = cur_label.connection
if isinstance(connection, Connection):
route.append(connection.departure_stop)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
if isinstance(connection, Connection):
last_arrival_stop = connection.arrival_stop
route.append(last_arrival_stop)
return route
event_list_raw_data = [
(1, 2, 0, 10, "trip_1", 1),
(2, 3, 10, 20, "trip_1", 1),
(4, 5, 30, 40, "trip_2", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(2, 4, d_walk=10)
walk_network.add_edge(3, 4, d_walk=10)
walk_speed = 1
target_stop = 5
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=False,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for label_bag in profile._label_bags:
for label in label_bag:
print('origin:', stop, 'n_boardings/movement_duration:', label.movement_duration, 'route:', unpack_route_from_labels(label))
print('optimal labels:')
for stop, profile in csa_profile.stop_profiles.items():
for label in profile.get_final_optimal_labels():
print('origin:', stop, 'n_boardings/movement_duration:', label.movement_duration, 'route:', unpack_route_from_labels(label))
#if stop == 1:
#assert 3 not in unpack_route_from_labels(label)
# print('origin:', stop, 'n_boardings:', label.n_boardings, 'route:', unpack_route_from_labels(label))
def test_journeys_using_movement_duration_last_stop_walk(self):
def unpack_route_from_labels(cur_label):
route = []
last_arrival_stop = None
print(cur_label)
while True:
print(cur_label.previous_label)
connection = cur_label.connection
if isinstance(connection, Connection):
route.append(connection.departure_stop)
if not cur_label.previous_label:
break
cur_label = cur_label.previous_label
if isinstance(connection, Connection):
last_arrival_stop = connection.arrival_stop
route.append(last_arrival_stop)
return route
event_list_raw_data = [
(1, 2, 0, 10, "trip_1", 1),
(2, 3, 10, 20, "trip_2", 1),
(4, 5, 30, 40, "trip_3", 1),
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(2, 4, d_walk=10)
walk_network.add_edge(3, 4, d_walk=10)
walk_network.add_edge(5, 6, d_walk=10)
walk_speed = 1
target_stop = 5
transfer_margin = 0
start_time = 0
end_time = 50
csa_profile = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed, track_vehicle_legs=False,
track_time=True, track_route=True)
csa_profile.run()
for stop, profile in csa_profile.stop_profiles.items():
for label_bag in profile._label_bags:
for label in label_bag:
print('origin:', stop,
'n_boardings/movement_duration:', label.movement_duration,
'route:', unpack_route_from_labels(label))
print('optimal labels:')
for stop, profile in csa_profile.stop_profiles.items():
for label in profile.get_final_optimal_labels():
print('origin:', stop,
'n_boardings/movement_duration:', label.movement_duration,
'route:', unpack_route_from_labels(label))
#if stop == 1:
#assert 3 not in unpack_route_from_labels(label)
# print('origin:', stop, 'n_boardings:', label.n_boardings, 'route:', unpack_route_from_labels(label))
def test_zero_length_journeys_potential_bug_1(self):
event_list_raw_data = [
(0, 1, 0, 0, "trip_1", 0),
(1, 2, 0, 0, "trip_1", 1)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_network.add_edge(10, 1, d_walk=20)
walk_network.add_edge(1, 11, d_walk=20)
walk_speed = 1
target_stop = 11
transfer_margin = 0
start_time = 0
end_time = 50
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed,
track_vehicle_legs=True,
track_time=True,
track_route=True)
csa_profiler.run()
stop_profile_1 = csa_profiler._stop_profiles[1]
all_labels_stop_profile_1 = [label for label_bag in stop_profile_1._label_bags for label in label_bag]
for label in all_labels_stop_profile_1:
self.assertLess(label.n_boardings, 1, "There should at most a walking label when going from 11 to 1 at any "
"point in time, now one label has " + str(label.n_boardings) +
" boardings"
)
def test_zero_length_journeys_potential_bug(self):
s = 0
a = 1
b = 2
t = 3
event_list_raw_data = [
(s, a, 0, 0, "trip_1", 1),
(a, b, 0, 0, "trip_1", 2),
(b, t, 1, 2, "trip_2", 0)
]
transit_connections = list(map(lambda el: Connection(*el), event_list_raw_data))
walk_network = networkx.Graph()
walk_speed = 1
target_stop = t
transfer_margin = 0
start_time = 0
end_time = 50
csa_profiler = MultiObjectivePseudoCSAProfiler(transit_connections, target_stop,
start_time, end_time, transfer_margin,
walk_network, walk_speed,
track_vehicle_legs=True,
track_time=True,
track_route=True)
csa_profiler.run()
stop_profile_a_labels = csa_profiler.stop_profiles[a].get_final_optimal_labels()
stop_profile_s_labels = csa_profiler.stop_profiles[s].get_final_optimal_labels()
self.assertEqual(len(stop_profile_a_labels), 1)
self.assertEqual(len(stop_profile_s_labels), 1)
| |
"""Helpful utilities for building analysis pipelines.
"""
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
import ConfigParser
import collections
import fnmatch
import subprocess
import toolz as tz
import yaml
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
with open(fname, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = ConfigParser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
pass
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f):
return gzip.open(f) if f.endswith(".gz") else open(f)
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def robust_partition_all(n, iterable):
"""
replaces partition_all with a more robust version.
Workaround for a segfault in pybedtools when using a BedTool as an iterator:
https://github.com/daler/pybedtools/issues/88 for the discussion
"""
it = iter(iterable)
while True:
x = []
for _ in range(n):
try:
x.append(it.next())
except StopIteration:
yield x
# Omitting this StopIteration results in a segfault!
raise StopIteration
yield x
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
return itertools.ifilterfalse(pred, t1), itertools.ifilter(pred, t2)
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.iteritems():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.iteritems():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el,
basestring):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, basestring)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program):
""" returns the path to an executable or None if it can't be found"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
from bcbio import install
return os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
cmd = """Rscript -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError, e:
return None
for line in output.split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_length = os.sysconf('SC_ARG_MAX')
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
import logging
import threading
import time
import traceback
import urllib
from collections import defaultdict
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms.display import DisplayData
from apache_beam.typehints import typehints
from apache_beam.utils.plugin import BeamPlugin
__all__ = ['DataflowRunner']
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
last_message_hash = None
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration / 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
logging.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0
or last_error_msg is not None
or str(response.currentState) == 'JOB_STATE_DONE'
or str(response.currentState) == 'JOB_STATE_CANCELLED'
or str(response.currentState) == 'JOB_STATE_UPDATED'
or str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
m_hash = hash(message)
if last_message_hash is not None and m_hash == last_message_hash:
# Skip the first message if it is the last message we got in the
# previous round. This can happen because we use the
# last_message_time as a parameter of the query for new messages.
continue
last_message_time = m.time
last_message_hash = m_hash
# Skip empty messages.
if m.messageImportance is None:
continue
logging.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if duration_secs > passed_secs:
logging.warning('Timing out on waiting for job %s after %d seconds',
job_id, passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
input_type = pcoll.element_type
# If input_type is not specified, then treat it as `Any`.
if not input_type:
input_type = typehints.Any
def coerce_to_kv_type(element_type):
if isinstance(element_type, typehints.TupleHint.TupleConstraint):
if len(element_type.tuple_types) == 2:
return element_type
else:
raise ValueError(
"Tuple input to GroupByKey must be have two components. "
"Found %s for %s" % (element_type, pcoll))
elif isinstance(input_type, typehints.AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# force a KV coder as the main output coder for the pcollection
# preceding a GroupByKey.
return typehints.KV[typehints.Any, typehints.Any]
elif isinstance(element_type, typehints.UnionConstraint):
union_types = [
coerce_to_kv_type(t) for t in element_type.union_types]
return typehints.KV[
typehints.Union[tuple(t.tuple_types[0] for t in union_types)],
typehints.Union[tuple(t.tuple_types[1] for t in union_types)]]
else:
# TODO: Possibly handle other valid types.
raise ValueError(
"Input to GroupByKey must be of Tuple or Any type. "
"Found %s for %s" % (element_type, pcoll))
pcoll.element_type = coerce_to_kv_type(input_type)
return GroupByKeyInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = transform_node.outputs[None]
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def run(self, pipeline):
"""Remotely executes entire pipeline or parts reachable from node."""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
# Performing configured PTransform overrides.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = pipeline._options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
self.job = apiclient.Job(pipeline._options)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# The superclass's run will trigger a traversal of all reachable nodes.
super(DataflowRunner, self).run(pipeline)
test_options = pipeline._options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
return None
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(
pipeline._options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(self._get_coder(typehint,
window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint),
window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError('Coder object must inherit from coders.Coder: %s.' %
str(coder))
return coder.as_cloud_object()
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': input_encoding['@type'],
'component_encodings': [input_encoding]
}
def _get_encoded_output_coder(self, transform_node, window_value=True):
"""Returns the cloud encoding of the coder for the output of a transform."""
if (len(transform_node.outputs) == 1
and transform_node.outputs[None].element_type is not None):
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[None].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
window_coder = (
transform_node.outputs[None].windowing.windowfn.get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(
element_type, window_coder=window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
self._cache.cache_output(transform_node, None, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[item.get_dict() for item in
DisplayData.create_from(transform_node.transform).items])
return step
def _add_singleton_step(self, label, full_label, tag, input_step):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)})
step.encoding = self._get_side_input_encoding(input_step.encoding)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (full_label, PropertyNames.OUTPUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
return step
def run_Impulse(self, transform_node):
standard_options = (
transform_node.outputs[None].pipeline._options.view_as(StandardOptions))
if standard_options.streaming:
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (
transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
else:
ValueError('Impulse source for batch pipelines has not been defined.')
def run_Flatten(self, transform_node):
step = self._add_step(TransformNames.FLATTEN,
transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append(
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def apply_WriteToBigQuery(self, transform, pcoll):
# Make sure this is the WriteToBigQuery class that we expected
if not isinstance(transform, beam.io.WriteToBigQuery):
return self.apply_PTransform(transform, pcoll)
standard_options = pcoll.pipeline._options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll)
else:
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
transform.schema,
transform.create_disposition,
transform.write_disposition))
def apply_GroupByKey(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError(('Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label,
coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection(pcoll.pipeline)
def run_GroupByKey(self, transform_node):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
windowing = transform_node.transform.get_windowing(
transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_ParDo(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
# We must call self._cache.get_pvalue exactly once due to refcounting.
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
for side_pval in transform_node.side_inputs:
assert isinstance(side_pval, AsSideInput)
step_number = self._get_unique_step_name()
si_label = 'SideInput-' + step_number
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
si_label, si_full_label, side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue))
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: si_label,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label + (
'/{}'.format(transform_name)
if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
fn_data = self._pardo_fn_data(transform_node, lookup_label)
step.add_property(PropertyNames.SERIALIZED_FN, pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'out' for main output and 'out_<tag>' for a tagged output.
# Using 'out' as a tag will not clash with the name for main since it will
# be transformed into 'out_out' internally.
outputs = []
step.encoding = self._get_encoded_output_coder(transform_node)
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
for side_tag in transform.output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: (
'%s_%s' % (PropertyNames.OUT, side_tag))})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (transform.fn, transform.args, transform.kwargs, si_tags_and_types,
transform_node.inputs[0].windowing)
def apply_CombineValues(self, transform, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def run_CombineValues(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
fn_data = (transform.fn, transform.args, transform.kwargs, ())
step.add_property(PropertyNames.SERIALIZED_FN,
pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform_node.transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
logging.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
logging.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source, traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT,
source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError('BigQuery source %r must specify either a table or'
' a query',
transform.source)
elif transform.source.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSource is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(PropertyNames.PUBSUB_TOPIC,
transform.source.full_topic)
if transform.source.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.source.id_label)
else:
raise ValueError(
'Source %r has unexpected format %s.' % (
transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform._infer_output_coder(),
coders.coders.GlobalWindowCoder()) # pylint: disable=protected-access
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run__NativeWrite(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
elif transform.sink.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSink is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
else:
raise ValueError(
'Sink %r has unexpected format %s.' % (
transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform.sink.coder,
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return urllib.quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return urllib.unquote(encoded_string)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self._is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# TODO: Move this table to a another location.
# Ordered by the enum values.
api_jobstate_map = {
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
}
return (api_jobstate_map[self._job.currentState] if self._job.currentState
else PipelineState.UNKNOWN)
def _is_in_terminal_state(self):
if not self.has_job:
return True
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
return self._job.currentState in [
values_enum.JOB_STATE_STOPPED, values_enum.JOB_STATE_DONE,
values_enum.JOB_STATE_FAILED, values_enum.JOB_STATE_CANCELLED,
values_enum.JOB_STATE_DRAINED]
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.isAlive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# _is_in_terminal_state.
terminated = (str(self._job.currentState) != 'JOB_STATE_RUNNING')
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self._is_in_terminal_state():
logging.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(), self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
logging.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
self.job_id(),
self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
| |
import copy
import threading
import time
import warnings
from collections import deque
from contextlib import contextmanager
import _thread
import pytz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, DatabaseError
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils import timezone
from django.utils.asyncio import async_unsafe
from django.utils.functional import cached_property
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
display_name = 'unknown'
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self._thread_sharing_lock = threading.Lock()
self._thread_sharing_count = 0
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func) tuple, where sids is a set of the
# active savepoint IDs when this function was registered.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Return a tzinfo of the database connection time zone.
This is only used when time zone support is enabled. When a datetime is
read from the database, it is always returned in this time zone.
When the database backend supports time zones, it doesn't matter which
time zone Django uses, as long as aware datetimes are used everywhere.
Other users connecting to the database can choose their own time zone.
When the database backend doesn't support time zones, the time zone
Django uses may be constrained by the requirements of other users of
the database.
"""
if not settings.USE_TZ:
return None
elif self.settings_dict['TIME_ZONE'] is None:
return timezone.utc
else:
return pytz.timezone(self.settings_dict['TIME_ZONE'])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict['TIME_ZONE'] is None:
return 'UTC'
else:
return self.settings_dict['TIME_ZONE']
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
@async_unsafe
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.monotonic() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict['TIME_ZONE'] is not None and not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is False."
% self.alias
)
@async_unsafe
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
@async_unsafe
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
@async_unsafe
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
@async_unsafe
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
@async_unsafe
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
@async_unsafe
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
@async_unsafe
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func) for (sids, func) in self.run_on_commit if sid not in sids
]
@async_unsafe
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
@async_unsafe
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explicit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit and not autocommit and
hasattr(self, '_start_transaction_under_autocommit')
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.monotonic() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
@property
def allow_thread_sharing(self):
with self._thread_sharing_lock:
return self._thread_sharing_count > 0
def inc_thread_sharing(self):
with self._thread_sharing_lock:
self._thread_sharing_count += 1
def dec_thread_sharing(self):
with self._thread_sharing_lock:
if self._thread_sharing_count <= 0:
raise RuntimeError('Cannot decrement the thread sharing count below zero.')
self._thread_sharing_count -= 1
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `inc_thread_sharing()`
method). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@contextmanager
def _nodb_cursor(self):
"""
Return a cursor from an alternative connection to be used when there is
no need to access the main database, specifically for test db
creation/deletion. This also prevents the production database from
being exposed to potential child threads while (or after) the test
database is destroyed. Refs #10868, #17786, #16969.
"""
conn = self.__class__({**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func):
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func))
elif not self.get_autocommit():
raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
sids, func = current_run_on_commit.pop(0)
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
return type(self)(settings_dict, alias)
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_array_equal
from nose.tools import assert_true, assert_equal
from mne import io, read_evokeds, read_proj
from mne.io.constants import FIFF
from mne.channels import read_layout, make_eeg_layout
from mne.datasets import testing
from mne.time_frequency.tfr import AverageTFR
from mne.utils import slow_test
from mne.viz import plot_evoked_topomap, plot_projs_topomap
from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap,
_find_peaks)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
"""Test topomap plotting
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# evoked
warnings.simplefilter('always')
res = 16
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0))
ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
ev_bad.pick_channels(ev_bad.ch_names[:2])
ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6) # auto, should plot EEG
assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
assert_raises(ValueError, ev_bad.plot_topomap, times=[-100]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
plt.close('all')
axes = [plt.subplot(221), plt.subplot(222)]
evoked.plot_topomap(axes=axes, colorbar=False)
plt.close('all')
evoked.plot_topomap(times=[-0.1, 0.2])
plt.close('all')
mask = np.zeros_like(evoked.data, dtype=bool)
mask[[1, 5], :] = True
evoked.plot_topomap(ch_type='mag', outlines=None)
times = [0.1]
evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
evoked.plot_topomap(times, ch_type='planar1', res=res)
evoked.plot_topomap(times, ch_type='planar2', res=res)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
show_names=True, mask_params={'marker': 'x'})
plt.close('all')
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average=-1000)
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average='hahahahah')
p = evoked.plot_topomap(times, ch_type='grad', res=res,
show_names=lambda x: x.replace('MEG', ''),
image_interp='bilinear')
subplot = [x for x in p.get_children() if
isinstance(x, matplotlib.axes.Subplot)][0]
assert_true(all('MEG' not in x.get_text()
for x in subplot.get_children()
if isinstance(x, matplotlib.text.Text)))
# Test title
def get_texts(p):
return [x.get_text() for x in p.get_children() if
isinstance(x, matplotlib.text.Text)]
p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
assert_equal(len(get_texts(p)), 0)
p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
texts = get_texts(p)
assert_equal(len(texts), 1)
assert_equal(texts[0], 'Custom')
plt.close('all')
# delaunay triangulation warning
with warnings.catch_warnings(record=True): # can't show
warnings.simplefilter('always')
evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
proj='interactive') # projs have already been applied
# change to no-proj mode
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0), proj=False)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
np.repeat(.1, 50))
assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
with warnings.catch_warnings(record=True): # file conventions
warnings.simplefilter('always')
projs = read_proj(ecg_fname)
projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
plot_projs_topomap(projs, res=res)
plt.close('all')
ax = plt.subplot(111)
plot_projs_topomap([projs[0]], res=res, axes=ax) # test axes param
plt.close('all')
for ch in evoked.info['chs']:
if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
ch['loc'].fill(0)
# Remove extra digitization point, so EEG digitization points
# correspond with the EEG electrodes
del evoked.info['dig'][85]
pos = make_eeg_layout(evoked.info).pos[:, :2]
pos, outlines = _check_outlines(pos, 'head')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.5)
pos, outlines = _check_outlines(pos, 'skirt')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(not outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.625)
pos, outlines = _check_outlines(pos, 'skirt',
head_pos={'scale': [1.2, 1.2]})
assert_array_equal(outlines['clip_radius'], 0.75)
# Plot skirt
evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
# Pass custom outlines without patch
evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
plt.close('all')
# Pass custom outlines with patch callable
def patch():
return Circle((0.5, 0.4687), radius=.46,
clip_on=True, transform=plt.gca().transAxes)
outlines['patch'] = patch
plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
# Remove digitization points. Now topomap should fail
evoked.info['dig'] = None
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
times, ch_type='eeg')
plt.close('all')
# Test error messages for invalid pos parameter
n_channels = len(pos)
data = np.ones(n_channels)
pos_1d = np.zeros(n_channels)
pos_3d = np.zeros((n_channels, 2, 2))
assert_raises(ValueError, plot_topomap, data, pos_1d)
assert_raises(ValueError, plot_topomap, data, pos_3d)
assert_raises(ValueError, plot_topomap, data, pos[:3, :])
pos_x = pos[:, :1]
pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
assert_raises(ValueError, plot_topomap, data, pos_x)
assert_raises(ValueError, plot_topomap, data, pos_xyz)
# An #channels x 4 matrix should work though. In this case (x, y, width,
# height) is assumed.
pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
plot_topomap(data, pos_xywh)
plt.close('all')
# Test peak finder
axes = [plt.subplot(131), plt.subplot(132)]
evoked.plot_topomap(times='peaks', axes=axes)
plt.close('all')
evoked.data = np.zeros(evoked.data.shape)
evoked.data[50][1] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
evoked.data[80][100] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
evoked.data[2][95] = 2
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
def test_plot_tfr_topomap():
"""Test plotting of TFR data
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
raw = _get_raw()
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
res=16)
eclick = mpl.backend_bases.MouseEvent('button_press_event',
plt.gcf().canvas, 0, 0, 1)
eclick.xdata = 0.1
eclick.ydata = 0.1
eclick.inaxes = plt.gca()
erelease = mpl.backend_bases.MouseEvent('button_release_event',
plt.gcf().canvas, 0.9, 0.9, 1)
erelease.xdata = 0.3
erelease.ydata = 0.2
pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
_onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
tfr._onselect(eclick, erelease, None, 'mean', None)
plt.close('all')
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..legacy import interfaces
def _time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, output_dim=None,
timesteps=None, training=None):
"""Apply `y . w + b` for every temporal slice y of x.
# Arguments
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: wether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
output_dim: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
training: training phase tensor or boolean.
# Returns
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
x.set_shape([None, None, output_dim])
else:
x = K.reshape(x, (-1, timesteps, output_dim))
return x
class Recurrent(Layer):
"""Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
# Example
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# for subsequent layers, no need to specify the input size:
model.add(LSTM(16))
# to stack recurrent layers, you must use return_sequences=True
# on any recurrent layer that feeds into another recurrent layer.
# note that you only need to specify the input size on the first layer.
model = Sequential()
model.add(LSTM(64, input_dim=64, input_length=10, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(10))
```
# Arguments
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
implementation: one of {0, 1, or 2}.
If set to 0, the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to 1, the RNN will use more matrix products,
but smaller ones, thus running slower
(may actually be faster on GPU) while consuming less memory.
If set to 2 (LSTM/GRU only),
the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU.
Note: RNN dropout must be shared for all gates,
resulting in a slightly reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shapes
3D tensor with shape `(batch_size, timesteps, input_dim)`,
(Optional) 2D tensors with shape `(batch_size, output_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
"""
def __init__(self, return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
implementation=0,
**kwargs):
super(Recurrent, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
if K.backend() == 'cntk' and stateful:
raise ValueError('Stateful RNN is not currently supported with CNTK.')
self.stateful = stateful
self.unroll = unroll
self.implementation = implementation
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self.dropout = 0
self.recurrent_dropout = 0
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], self.units)
else:
output_shape = (input_shape[0], self.units)
if self.return_state:
state_shape = [(input_shape[0], self.units) for _ in self.states]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def step(self, inputs, states):
raise NotImplementedError
def get_constants(self, inputs, training=None):
return []
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.units]) # (samples, output_dim)
initial_state = [initial_state for _ in range(len(self.states))]
return initial_state
def preprocess_input(self, inputs, training=None):
return inputs
def __call__(self, inputs, initial_state=None, **kwargs):
# If `initial_state` is specified,
# and if it a Keras tensor,
# then add it to the inputs and temporarily
# modify the input spec to include the state.
if initial_state is None:
return super(Recurrent, self).__call__(inputs, **kwargs)
if not isinstance(initial_state, (list, tuple)):
initial_state = [initial_state]
is_keras_tensor = hasattr(initial_state[0], '_keras_history')
for tensor in initial_state:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state of an RNN layer cannot be'
' specified with a mix of Keras tensors and'
' non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state
input_spec = self.input_spec
state_spec = self.state_spec
if not isinstance(input_spec, list):
input_spec = [input_spec]
if not isinstance(state_spec, list):
state_spec = [state_spec]
self.input_spec = input_spec + state_spec
# Compute the full inputs, including state
inputs = [inputs] + list(initial_state)
# Perform the call
output = super(Recurrent, self).__call__(inputs, **kwargs)
# Restore original input spec
self.input_spec = input_spec
return output
else:
kwargs['initial_state'] = initial_state
return super(Recurrent, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, training=None, initial_state=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
if self.unroll and input_shape[1] is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
constants = self.get_constants(inputs, training=None)
preprocessed_input = self.preprocess_input(inputs, training=None)
last_output, outputs, states = K.rnn(self.step,
preprocessed_input,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
# Properly set learning phase
if 0 < self.dropout + self.recurrent_dropout:
last_output._uses_learning_phase = True
outputs._uses_learning_phase = True
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
self.states = [K.zeros((batch_size, self.units))
for _ in self.states]
elif states is None:
for state in self.states:
K.set_value(state, np.zeros((batch_size, self.units)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if value.shape != (batch_size, self.units):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, self.units)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'implementation': self.implementation}
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(Recurrent):
"""Fully-connected RNN where the output is to be fed back to input.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
# References
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNN, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(shape=(self.input_dim, self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation > 0:
return inputs
else:
input_shape = K.int_shape(inputs)
input_dim = input_shape[2]
timesteps = input_shape[1]
return _time_distributed_dense(inputs,
self.kernel,
self.bias,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
def step(self, inputs, states):
if self.implementation == 0:
h = inputs
else:
if 0 < self.dropout < 1:
h = K.dot(inputs * states[1], self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
prev_output = states[0]
if 0 < self.recurrent_dropout < 1:
prev_output *= states[2]
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(Recurrent):
"""Gated Recurrent Unit - Cho et al. 2014.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
# References
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(GRU, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(shape=(self.input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = K.int_shape(inputs)
input_dim = input_shape[2]
timesteps = input_shape[1]
x_z = _time_distributed_dense(inputs, self.kernel_z, self.bias_z,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_r = _time_distributed_dense(inputs, self.kernel_r, self.bias_r,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_h = _time_distributed_dense(inputs, self.kernel_h, self.bias_h,
self.dropout, input_dim, self.units,
timesteps, training=training)
return K.concatenate([x_z, x_r, x_h], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def step(self, inputs, states):
h_tm1 = states[0] # previous memory
dp_mask = states[1] # dropout matrices for recurrent units
rec_dp_mask = states[2]
if self.implementation == 2:
matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
else:
if self.implementation == 0:
x_z = inputs[:, :self.units]
x_r = inputs[:, self.units: 2 * self.units]
x_h = inputs[:, 2 * self.units:]
elif self.implementation == 1:
x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
else:
raise ValueError('Unknown `implementation` mode.')
z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_h))
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(Recurrent):
"""Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(LSTM, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(shape=(self.input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(shape, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = K.int_shape(inputs)
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = _time_distributed_dense(inputs, self.kernel_i, self.bias_i,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_f = _time_distributed_dense(inputs, self.kernel_f, self.bias_f,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_c = _time_distributed_dense(inputs, self.kernel_c, self.bias_c,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_o = _time_distributed_dense(inputs, self.kernel_o, self.bias_o,
self.dropout, input_dim, self.units,
timesteps, training=training)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if self.implementation == 2:
z = K.dot(inputs * dp_mask[0], self.kernel)
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
else:
if self.implementation == 0:
x_i = inputs[:, :self.units]
x_f = inputs[:, self.units: 2 * self.units]
x_c = inputs[:, 2 * self.units: 3 * self.units]
x_o = inputs[:, 3 * self.units:]
elif self.implementation == 1:
x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
else:
raise ValueError('Unknown `implementation` mode.')
i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
self.recurrent_kernel_o))
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import uuid
import mock
from oslo_utils import timeutils
import six
from swiftclient import client as swiftclient_client
from swiftclient import exceptions as swiftclient_exceptions
from testtools import matchers
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import swift
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import template as templatem
from heat.tests import common
from heat.tests import utils
swiftsignal_template = '''
heat_template_version: 2013-05-23
resources:
test_wait_condition:
type: "OS::Heat::SwiftSignal"
properties:
handle: { get_resource: test_wait_condition_handle }
timeout: 1
count: 2
test_wait_condition_handle:
type: "OS::Heat::SwiftSignalHandle"
'''
swiftsignalhandle_template = '''
heat_template_version: 2013-05-23
resources:
test_wait_condition_handle:
type: "OS::Heat::SwiftSignalHandle"
'''
container_header = {
'content-length': '2',
'x-container-object-count': '0',
'accept-ranges': 'bytes',
'date': 'Fri, 25 Jul 2014 16:02:03 GMT',
'x-timestamp': '1405019787.66969',
'x-trans-id': 'tx6651b005324341f685e71-0053d27f7bdfw1',
'x-container-bytes-used': '0',
'content-type': 'application/json; charset=utf-8',
'x-versions-location': 'test'
}
obj_header = {
'content-length': '5',
'accept-ranges': 'bytes',
'last-modified': 'Fri, 25 Jul 2014 16:05:26 GMT',
'etag': '5a105e8b9d40e1329780d62ea2265d8a',
'x-timestamp': '1406304325.40094',
'x-trans-id': 'tx2f40ff2b4daa4015917fc-0053d28045dfw1',
'date': 'Fri, 25 Jul 2014 16:05:25 GMT',
'content-type': 'application/octet-stream'
}
def create_stack(template, stack_id=None):
tmpl = template_format.parse(template)
template = templatem.Template(tmpl)
ctx = utils.dummy_context(tenant_id='test_tenant')
st = stack.Stack(ctx, 'test_st', template,
disable_rollback=True)
# Stub out the stack ID so we have a known value
if stack_id is None:
stack_id = str(uuid.uuid4())
with utils.UUIDStub(stack_id):
st.store()
st.id = stack_id
return st
def cont_index(obj_name, num_version_hist):
objects = [{'bytes': 11,
'last_modified': '2014-07-03T19:42:03.281640',
'hash': '9214b4e4460fcdb9f3a369941400e71e',
'name': "02b" + obj_name + '/1404416326.51383',
'content_type': 'application/octet-stream'}] * num_version_hist
objects.append({'bytes': 8,
'last_modified': '2014-07-03T19:42:03.849870',
'hash': '9ab7c0738852d7dd6a2dc0b261edc300',
'name': obj_name,
'content_type': 'application/x-www-form-urlencoded'})
return (container_header, objects)
class SwiftSignalHandleTest(common.HeatTestCase):
def setUp(self):
super(SwiftSignalHandleTest, self).setUp()
utils.setup_dummy_db()
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_create(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.return_value = (obj_header, '{"id": "1"}')
st.create()
handle = st.resources['test_wait_condition_handle']
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
regexp = ("http://fake-host.com:8080/v1/AUTH_test_tenant/%s/test_st-"
"test_wait_condition_handle-abcdefghijkl"
"\?temp_url_sig=[0-9a-f]{40}&temp_url_expires=[0-9]{10}"
% st.id)
res_id = st.resources['test_wait_condition_handle'].resource_id
self.assertEqual(res_id, handle.physical_resource_name())
self.assertThat(handle.FnGetRefId(), matchers.MatchesRegex(regexp))
# Since the account key is mocked out above
self.assertFalse(mock_swift_object.post_account.called)
header = {'x-versions-location': st.id}
self.assertEqual({'headers': header},
mock_swift_object.put_container.call_args[1])
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_delete_empty_container(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
st.create()
exc = swiftclient_exceptions.ClientException("Object DELETE failed",
http_status=404)
mock_swift_object.delete_object.side_effect = (None, None, None, exc)
exc = swiftclient_exceptions.ClientException("Container DELETE failed",
http_status=404)
mock_swift_object.delete_container.side_effect = exc
rsrc = st.resources['test_wait_condition_handle']
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual(('DELETE', 'COMPLETE'), rsrc.state)
self.assertEqual(4, mock_swift_object.delete_object.call_count)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_delete_object_error(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
st.create()
exc = swiftclient_exceptions.ClientException("Overlimit",
http_status=413)
mock_swift_object.delete_object.side_effect = (None, None, None, exc)
rsrc = st.resources['test_wait_condition_handle']
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual('ClientException: '
'resources.test_wait_condition_handle: '
'Overlimit: 413', six.text_type(exc))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_delete_container_error(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
st.create()
exc = swiftclient_exceptions.ClientException("Object DELETE failed",
http_status=404)
mock_swift_object.delete_object.side_effect = (None, None, None, exc)
exc = swiftclient_exceptions.ClientException("Overlimit",
http_status=413)
mock_swift_object.delete_container.side_effect = (exc,)
rsrc = st.resources['test_wait_condition_handle']
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual('ClientException: '
'resources.test_wait_condition_handle: '
'Overlimit: 413', six.text_type(exc))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_delete_non_empty_container(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
st.create()
exc = swiftclient_exceptions.ClientException("Object DELETE failed",
http_status=404)
mock_swift_object.delete_object.side_effect = (None, None, None, exc)
exc = swiftclient_exceptions.ClientException("Container DELETE failed",
http_status=409)
mock_swift_object.delete_container.side_effect = exc
rsrc = st.resources['test_wait_condition_handle']
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual(('DELETE', 'COMPLETE'), rsrc.state)
self.assertEqual(4, mock_swift_object.delete_object.call_count)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
def test_handle_update(self, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': "1234"
}
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
st.create()
rsrc = st.resources['test_wait_condition_handle']
old_url = rsrc.FnGetRefId()
update_snippet = rsrc_defn.ResourceDefinition(handle.name,
handle.type(),
handle.properties.data)
scheduler.TaskRunner(handle.update, update_snippet)()
self.assertEqual(old_url, rsrc.FnGetRefId())
class SwiftSignalTest(common.HeatTestCase):
def setUp(self):
super(SwiftSignalTest, self).setUp()
utils.setup_dummy_db()
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_create(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.return_value = (obj_header, '')
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
@mock.patch.object(swift.SwiftClientPlugin, 'get_signal_url')
def test_validate_handle_url_bad_tempurl(self, mock_handle_url):
mock_handle_url.return_value = (
"http://fake-host.com:8080/v1/my-container/"
"test_st-test_wait_condition_handle?temp_url_sig="
"12d8f9f2c923fbeb555041d4ed63d83de6768e95&"
"temp_url_expires=1404762741")
st = create_stack(swiftsignal_template)
st.create()
self.assertIn('not a valid SwiftSignalHandle. The Swift TempURL path',
six.text_type(st.status_reason))
@mock.patch.object(swift.SwiftClientPlugin, 'get_signal_url')
def test_validate_handle_url_bad_container_name(self, mock_handle_url):
mock_handle_url.return_value = (
"http://fake-host.com:8080/v1/AUTH_test_tenant/my-container/"
"test_st-test_wait_condition_handle?temp_url_sig="
"12d8f9f2c923fbeb555041d4ed63d83de6768e95&"
"temp_url_expires=1404762741")
st = create_stack(swiftsignal_template)
st.create()
self.assertIn('not a valid SwiftSignalHandle. The container name',
six.text_type(st.status_reason))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_multiple_signals_same_id_complete(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.side_effect = (
(obj_header, json.dumps({'id': 1})),
(obj_header, json.dumps({'id': 1})),
(obj_header, json.dumps({'id': 1})),
(obj_header, json.dumps({'id': 1})),
(obj_header, json.dumps({'id': 2})),
(obj_header, json.dumps({'id': 3})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_multiple_signals_same_id_timeout(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.return_value = (obj_header,
json.dumps({'id': 1}))
time_now = timeutils.utcnow()
time_series = [datetime.timedelta(0, t) + time_now
for t in six.moves.xrange(1, 100)]
timeutils.set_time_override(time_series)
self.addCleanup(timeutils.clear_time_override)
st.create()
self.assertIn("SwiftSignalTimeout: resources.test_wait_condition: "
"1 of 2 received - Signal 1 received",
st.status_reason)
wc = st['test_wait_condition']
self.assertEqual("SwiftSignalTimeout: resources.test_wait_condition: "
"1 of 2 received - Signal 1 received",
wc.status_reason)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_post_complete_to_handle(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.side_effect = (
(obj_header, json.dumps({'id': 1, 'status': "SUCCESS"})),
(obj_header, json.dumps({'id': 1, 'status': "SUCCESS"})),
(obj_header, json.dumps({'id': 2, 'status': "SUCCESS"})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_post_failed_to_handle(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# Create
(obj_header, json.dumps({'id': 1, 'status': "FAILURE",
'reason': "foo"})),
(obj_header, json.dumps({'id': 2, 'status': "FAILURE",
'reason': "bar"})),
# SwiftSignalFailure
(obj_header, json.dumps({'id': 1, 'status': "FAILURE",
'reason': "foo"})),
(obj_header, json.dumps({'id': 2, 'status': "FAILURE",
'reason': "bar"})),
)
st.create()
self.assertEqual(('CREATE', 'FAILED'), st.state)
wc = st['test_wait_condition']
self.assertEqual("SwiftSignalFailure: resources.test_wait_condition: "
"foo;bar", wc.status_reason)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_data(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, json.dumps({'id': 1, 'data': "foo"})),
(obj_header, json.dumps({'id': 2, 'data': "bar"})),
(obj_header, json.dumps({'id': 3, 'data': "baz"})),
# FnGetAtt call
(obj_header, json.dumps({'id': 1, 'data': "foo"})),
(obj_header, json.dumps({'id': 2, 'data': "bar"})),
(obj_header, json.dumps({'id': 3, 'data': "baz"})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
wc = st['test_wait_condition']
self.assertEqual(json.dumps({1: 'foo', 2: 'bar', 3: 'baz'}),
wc.FnGetAtt('data'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_data_noid(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, json.dumps({'data': "foo", 'reason': "bar",
'status': "SUCCESS"})),
(obj_header, json.dumps({'data': "dog", 'reason': "cat",
'status': "SUCCESS"})),
# FnGetAtt call
(obj_header, json.dumps({'data': "foo", 'reason': "bar",
'status': "SUCCESS"})),
(obj_header, json.dumps({'data': "dog", 'reason': "cat",
'status': "SUCCESS"})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
wc = st['test_wait_condition']
self.assertEqual(json.dumps({1: 'foo', 2: 'dog'}), wc.FnGetAtt('data'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_data_nodata(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, ''),
(obj_header, ''),
# FnGetAtt call
(obj_header, ''),
(obj_header, ''),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
wc = st['test_wait_condition']
self.assertEqual(json.dumps({1: None, 2: None}), wc.FnGetAtt('data'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_data_partial_complete(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
wc = st['test_wait_condition']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.return_value = (
obj_header, json.dumps({'status': 'SUCCESS'}))
st.create()
self.assertEqual(['SUCCESS', 'SUCCESS'], wc.get_status())
expected = [{'status': 'SUCCESS', 'reason': 'Signal 1 received',
'data': None, 'id': 1},
{'status': 'SUCCESS', 'reason': 'Signal 2 received',
'data': None, 'id': 2}]
self.assertEqual(expected, wc.get_signals())
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_get_status_none_complete(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
wc = st['test_wait_condition']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.return_value = (obj_header, '')
st.create()
self.assertEqual(['SUCCESS', 'SUCCESS'], wc.get_status())
expected = [{'status': 'SUCCESS', 'reason': 'Signal 1 received',
'data': None, 'id': 1},
{'status': 'SUCCESS', 'reason': 'Signal 2 received',
'data': None, 'id': 2}]
self.assertEqual(expected, wc.get_signals())
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_get_status_partial_complete(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
wc = st['test_wait_condition']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.return_value = (
obj_header, json.dumps({'id': 1, 'status': "SUCCESS"}))
st.create()
self.assertEqual(['SUCCESS'], wc.get_status())
expected = [{'status': 'SUCCESS', 'reason': 'Signal 1 received',
'data': None, 'id': 1}]
self.assertEqual(expected, wc.get_signals())
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_get_status_failure(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
wc = st['test_wait_condition']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.return_value = (
obj_header, json.dumps({'id': 1, 'status': "FAILURE"}))
st.create()
self.assertEqual(('CREATE', 'FAILED'), st.state)
self.assertEqual(['FAILURE'], wc.get_status())
expected = [{'status': 'FAILURE', 'reason': 'Signal 1 received',
'data': None, 'id': 1}]
self.assertEqual(expected, wc.get_signals())
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_getatt_token(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, ''),
(obj_header, ''),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
self.assertEqual('', handle.FnGetAtt('token'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_getatt_endpoint(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, ''),
(obj_header, ''),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
expected = ('http://fake-host.com:8080/v1/AUTH_test_tenant/%s/'
'test_st-test_wait_condition_handle-abcdefghijkl\?temp_'
'url_sig=[0-9a-f]{40}&temp_url_expires=[0-9]{10}') % st.id
self.assertThat(handle.FnGetAtt('endpoint'),
matchers.MatchesRegex(expected))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_getatt_curl_cli(self, mock_name, mock_swift):
st = create_stack(swiftsignalhandle_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, ''),
(obj_header, ''),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
expected = ("curl -i -X PUT 'http://fake-host.com:8080/v1/"
"AUTH_test_tenant/%s/test_st-test_wait_condition_"
"handle-abcdefghijkl\?temp_url_sig=[0-9a-f]{40}&"
"temp_url_expires=[0-9]{10}'") % st.id
self.assertThat(handle.FnGetAtt('curl_cli'),
matchers.MatchesRegex(expected))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_invalid_json_data(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.side_effect = (
# st create
(obj_header, '{"status": "SUCCESS"'),
(obj_header, '{"status": "FAI'),
)
st.create()
self.assertEqual(('CREATE', 'FAILED'), st.state)
wc = st['test_wait_condition']
self.assertEqual('Error: resources.test_wait_condition: '
'Failed to parse JSON data: {"status": '
'"SUCCESS"', wc.status_reason)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_unknown_status(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 1)
mock_swift_object.get_object.return_value = (
obj_header, '{"status": "BOO"}')
st.create()
self.assertEqual(('CREATE', 'FAILED'), st.state)
wc = st['test_wait_condition']
self.assertEqual('Error: resources.test_wait_condition: '
'Unknown status: BOO', wc.status_reason)
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_swift_objects_deleted(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.side_effect = (
cont_index(obj_name, 2), # Objects are there during create
(container_header, []), # The user deleted the objects
)
mock_swift_object.get_object.side_effect = (
(obj_header, json.dumps({'id': 1})), # Objects there during create
(obj_header, json.dumps({'id': 2})),
(obj_header, json.dumps({'id': 3})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
wc = st['test_wait_condition']
self.assertEqual("null", wc.FnGetAtt('data'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_swift_container_deleted(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.side_effect = [
cont_index(obj_name, 2), # Objects are there during create
swiftclient_client.ClientException("Container GET failed",
http_status=404) # User deleted
]
mock_swift_object.get_object.side_effect = (
(obj_header, json.dumps({'id': 1})), # Objects there during create
(obj_header, json.dumps({'id': 2})),
(obj_header, json.dumps({'id': 3})),
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
wc = st['test_wait_condition']
self.assertEqual("null", wc.FnGetAtt('data'))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
def test_swift_get_object_404(self, mock_name, mock_swift):
st = create_stack(swiftsignal_template)
handle = st['test_wait_condition_handle']
mock_swift_object = mock.Mock()
mock_swift.return_value = mock_swift_object
mock_swift_object.url = "http://fake-host.com:8080/v1/AUTH_1234"
mock_swift_object.head_account.return_value = {
'x-account-meta-temp-url-key': '123456'
}
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
mock_name.return_value = obj_name
mock_swift_object.get_container.return_value = cont_index(obj_name, 2)
mock_swift_object.get_object.side_effect = (
(obj_header, ''),
swiftclient_client.ClientException(
"Object %s not found" % obj_name, http_status=404)
)
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
| |
"""
Custom manager for Objects.
"""
import re
from itertools import chain
from django.db.models import Q
from django.conf import settings
from django.db.models.fields import exceptions
from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager
from evennia.typeclasses.managers import returns_typeclass, returns_typeclass_list
from evennia.utils.utils import to_unicode, is_iter, make_iter, string_partial_matching
from builtins import int
__all__ = ("ObjectManager",)
_GA = object.__getattribute__
# delayed import
_ATTR = None
_MULTIMATCH_REGEX = re.compile(settings.SEARCH_MULTIMATCH_REGEX, re.I + re.U)
# Try to use a custom way to parse id-tagged multimatches.
class ObjectDBManager(TypedObjectManager):
"""
This ObjectManager implements methods for searching
and manipulating Objects directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (alias: dbref_search)
get_dbref_range
object_totals
typeclass_search
get_object_with_player
get_objs_with_key_and_typeclass
get_objs_with_attr
get_objs_with_attr_match
get_objs_with_db_property
get_objs_with_db_property_match
get_objs_with_key_or_alias
get_contents
object_search (interface to many of the above methods,
equivalent to evennia.search_object)
copy_object
"""
#
# ObjectManager Get methods
#
# player related
@returns_typeclass
def get_object_with_player(self, ostring, exact=True, candidates=None):
"""
Search for an object based on its player's name or dbref.
Args:
ostring (str or int): Search criterion or dbref. Searching
for a player is sometimes initiated by appending an `*` to
the beginning of the search criterion (e.g. in
local_and_global_search). This is stripped here.
exact (bool, optional): Require an exact player match.
candidates (list, optional): Only search among this list of possible
object candidates.
Return:
match (Object or list): One or more matching results.
"""
ostring = to_unicode(ostring).lstrip('*')
# simplest case - search by dbref
dbref = self.dbref(ostring)
if dbref:
return dbref
# not a dbref. Search by name.
cand_restriction = candidates is not None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates)
if obj]) or Q()
if exact:
return self.filter(cand_restriction & Q(db_player__username__iexact=ostring))
else: # fuzzy matching
ply_cands = self.filter(cand_restriction & Q(playerdb__username__istartswith=ostring)
).values_list("db_key", flat=True)
if candidates:
index_matches = string_partial_matching(ply_cands, ostring, ret_index=True)
return [obj for ind, obj in enumerate(make_iter(candidates)) if ind in index_matches]
else:
return string_partial_matching(ply_cands, ostring, ret_index=False)
@returns_typeclass_list
def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):
"""
Returns objects based on simultaneous key and typeclass match.
Args:
oname (str): Object key to search for
otypeclass_path (str): Full Python path to tyepclass to search for
candidates (list, optional): Only match among the given list of candidates.
Returns:
matches (list): The matching objects.
"""
cand_restriction = candidates is not None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates)
if obj]) or Q()
return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))
# attr/property related
@returns_typeclass_list
def get_objs_with_attr(self, attribute_name, candidates=None):
"""
Get objects based on having a certain Attribute defined.
Args:
attribute_name (str): Attribute name to search for.
candidates (list, optional): Only match among the given list of candidates.
Returns:
matches (list): All objects having the given attribute_name defined at all.
"""
cand_restriction = candidates is not None and Q(db_attributes__db_obj__pk__in=[_GA(obj, "id") for obj
in make_iter(candidates)
if obj]) or Q()
return list(self.filter(cand_restriction & Q(db_attributes__db_key=attribute_name)))
@returns_typeclass_list
def get_objs_with_attr_value(self, attribute_name, attribute_value, candidates=None, typeclasses=None):
"""
Get all objects having the given attrname set to the given value.
Args:
attribute_name (str): Attribute key to search for.
attribute_value (str): Attribute value to search for.
candidates (list, optional): Candidate objects to limit search to.
typeclasses (list, optional): Python pats to restrict matches with.
Returns:
matches (list): Objects fullfilling both the `attribute_name` and `attribute_value` criterions.
Notes:
This uses the Attribute's PickledField to transparently search the database by matching
the internal representation. This is reasonably effective but since Attribute values
cannot be indexed, searching by Attribute key is to be preferred whenever possible.
"""
cand_restriction = candidates is not None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates)
if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
# This doesn't work if attribute_value is an object. Workaround below
if isinstance(attribute_value, (basestring, int, float, bool)):
return self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name,
db_attributes__db_value=attribute_value))
else:
# We must loop for safety since the referenced lookup gives deepcopy error if attribute value is an object.
global _ATTR
if not _ATTR:
from evennia.typeclasses.models import Attribute as _ATTR
cands = list(self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name)))
results = [attr.objectdb_set.all() for attr in _ATTR.objects.filter(objectdb__in=cands,
db_value=attribute_value)]
return chain(*results)
@returns_typeclass_list
def get_objs_with_db_property(self, property_name, candidates=None):
"""
Get all objects having a given db field property.
Args:
property_name (str): The name of the field to match for.
candidates (list, optional): Only search among th egiven candidates.
Returns:
matches (list): The found matches.
"""
property_name = "db_%s" % property_name.lstrip('db_')
cand_restriction = candidates is not None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates)
if obj]) or Q()
querykwargs = {property_name: None}
try:
return list(self.filter(cand_restriction).exclude(Q(**querykwargs)))
except exceptions.FieldError:
return []
@returns_typeclass_list
def get_objs_with_db_property_value(self, property_name, property_value, candidates=None, typeclasses=None):
"""
Get objects with a specific field name and value.
Args:
property_name (str): Field name to search for.
property_value (any): Value required for field with `property_name` to have.
candidates (list, optional): List of objects to limit search to.
typeclasses (list, optional): List of typeclass-path strings to restrict matches with
"""
if isinstance(property_value, basestring):
property_value = to_unicode(property_value)
if isinstance(property_name, basestring):
if not property_name.startswith('db_'):
property_name = "db_%s" % property_name
querykwargs = {property_name: property_value}
cand_restriction = candidates is not None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates)
if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
try:
return list(self.filter(cand_restriction & type_restriction & Q(**querykwargs)))
except exceptions.FieldError:
return []
except ValueError:
from evennia.utils import logger
logger.log_err("The property '%s' does not support search criteria of the type %s." %
(property_name, type(property_value)))
return []
@returns_typeclass_list
def get_contents(self, location, excludeobj=None):
"""
Get all objects that has a location set to this one.
Args:
location (Object): Where to get contents from.
excludeobj (Object or list, optional): One or more objects
to exclude from the match.
Returns:
contents (list): Matching contents, without excludeobj, if given.
"""
exclude_restriction = Q(pk__in=[_GA(obj, "id") for obj in make_iter(excludeobj)]) if excludeobj else Q()
return self.filter(db_location=location).exclude(exclude_restriction)
@returns_typeclass_list
def get_objs_with_key_or_alias(self, ostring, exact=True,
candidates=None, typeclasses=None):
"""
Args:
ostring (str): A search criterion.
exact (bool, optional): Require exact match of ostring
(still case-insensitive). If `False`, will do fuzzy matching
using `evennia.utils.utils.string_partial_matching` algorithm.
candidates (list): Only match among these candidates.
typeclasses (list): Only match objects with typeclasses having thess path strings.
Returns:
matches (list): A list of matches of length 0, 1 or more.
"""
if not isinstance(ostring, basestring):
if hasattr(ostring, "key"):
ostring = ostring.key
else:
return []
if is_iter(candidates) and not len(candidates):
# if candidates is an empty iterable there can be no matches
# Exit early.
return []
# build query objects
candidates_id = [_GA(obj, "id") for obj in make_iter(candidates) if obj]
cand_restriction = candidates is not None and Q(pk__in=candidates_id) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
if exact:
# exact match - do direct search
return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |
Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact="alias"))).distinct()
elif candidates:
# fuzzy with candidates
search_candidates = self.filter(cand_restriction & type_restriction)
else:
# fuzzy without supplied candidates - we select our own candidates
search_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) |
Q(db_tags__db_key__istartswith=ostring))).distinct()
# fuzzy matching
key_strings = search_candidates.values_list("db_key", flat=True).order_by("id")
index_matches = string_partial_matching(key_strings, ostring, ret_index=True)
if index_matches:
# a match by key
return [obj for ind, obj in enumerate(search_candidates) if ind in index_matches]
else:
# match by alias rather than by key
search_candidates = search_candidates.filter(db_tags__db_tagtype__iexact="alias",
db_tags__db_key__icontains=ostring)
alias_strings = []
alias_candidates = []
# TODO create the alias_strings and alias_candidates lists more efficiently?
for candidate in search_candidates:
for alias in candidate.aliases.all():
alias_strings.append(alias)
alias_candidates.append(candidate)
index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)
if index_matches:
return [alias_candidates[ind] for ind in index_matches]
return []
# main search methods and helper functions
@returns_typeclass_list
def search_object(self, searchdata,
attribute_name=None,
typeclass=None,
candidates=None,
exact=True,
use_dbref=True):
"""
Search as an object globally or in a list of candidates and
return results. The result is always an Object. Always returns
a list.
Args:
searchdata (str or Object): The entity to match for. This is
usually a key string but may also be an object itself.
By default (if no `attribute_name` is set), this will
search `object.key` and `object.aliases` in order.
Can also be on the form #dbref, which will (if
`exact=True`) be matched against primary key.
attribute_name (str): Use this named Attribute to
match searchdata against, instead of the defaults. If
this is the name of a database field (with or without
the `db_` prefix), that will be matched too.
typeclass (str or TypeClass): restrict matches to objects
having this typeclass. This will help speed up global
searches.
candidates (list): If supplied, search will
only be performed among the candidates in this list. A
common list of candidates is the contents of the
current location searched.
exact (bool): Match names/aliases exactly or partially.
Partial matching matches the beginning of words in the
names/aliases, using a matching routine to separate
multiple matches in names with multiple components (so
"bi sw" will match "Big sword"). Since this is more
expensive than exact matching, it is recommended to be
used together with the `candidates` keyword to limit the
number of possibilities. This value has no meaning if
searching for attributes/properties.
use_dbref (bool): If False, bypass direct lookup of a string
on the form #dbref and treat it like any string.
Returns:
matches (list): Matching objects
"""
def _searcher(searchdata, candidates, typeclass, exact=False):
"""
Helper method for searching objects. `typeclass` is only used
for global searching (no candidates)
"""
if attribute_name:
# attribute/property search (always exact).
matches = self.get_objs_with_db_property_value(attribute_name, searchdata,
candidates=candidates, typeclasses=typeclass)
if matches:
return matches
return self.get_objs_with_attr_value(attribute_name, searchdata,
candidates=candidates, typeclasses=typeclass)
else:
# normal key/alias search
return self.get_objs_with_key_or_alias(searchdata, exact=exact,
candidates=candidates, typeclasses=typeclass)
if not searchdata and searchdata != 0:
return []
if typeclass:
# typeclass may also be a list
typeclasses = make_iter(typeclass)
for i, typeclass in enumerate(make_iter(typeclasses)):
if callable(typeclass):
typeclasses[i] = u"%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclasses[i] = u"%s" % typeclass
typeclass = typeclasses
if candidates is not None:
if not candidates:
# candidates is the empty list. This should mean no matches can ever be acquired.
return []
# Convenience check to make sure candidates are really dbobjs
candidates = [cand for cand in make_iter(candidates) if cand]
if typeclass:
candidates = [cand for cand in candidates
if _GA(cand, "db_typeclass_path") in typeclass]
dbref = not attribute_name and exact and use_dbref and self.dbref(searchdata)
if dbref:
# Easiest case - dbref matching (always exact)
dbref_match = self.dbref_search(dbref)
if dbref_match:
if not candidates or dbref_match in candidates:
return [dbref_match]
else:
return []
# Search through all possibilities.
match_number = None
# always run first check exact - we don't want partial matches
# if on the form of 1-keyword etc.
matches = _searcher(searchdata, candidates, typeclass, exact=True)
if not matches:
# no matches found - check if we are dealing with N-keyword
# query - if so, strip it.
match = _MULTIMATCH_REGEX.match(searchdata)
match_number = None
if match:
# strips the number
match_number, searchdata = match.group("number"), match.group("name")
match_number = int(match_number) - 1
match_number = match_number if match_number >= 0 else None
if match_number is not None or not exact:
# run search again, with the exactness set by call
matches = _searcher(searchdata, candidates, typeclass, exact=exact)
# deal with result
if len(matches) > 1 and match_number is not None:
# multiple matches, but a number was given to separate them
try:
matches = [matches[match_number]]
except IndexError:
# match number not matching anything
pass
# return a list (possibly empty)
return matches
# alias for backwards compatibility
object_search = search_object
#
# ObjectManager Copy method
def copy_object(self, original_object, new_key=None,
new_location=None, new_home=None,
new_permissions=None, new_locks=None,
new_aliases=None, new_destination=None):
"""
Create and return a new object as a copy of the original object. All
will be identical to the original except for the arguments given
specifically to this method.
Args:
original_object (Object): The object to make a copy from.
new_key (str, optional): Name of the copy, if different
from the original.
new_location (Object, optional): Alternate location.
new_home (Object, optional): Change the home location
new_aliases (list, optional): Give alternate object
aliases as a list of strings.
new_destination (Object, optional): Used only by exits.
Returns:
copy (Object or None): The copy of `original_object`,
optionally modified as per the ingoing keyword
arguments. `None` if an error was encountered.
"""
# get all the object's stats
typeclass_path = original_object.typeclass_path
if not new_key:
new_key = original_object.key
if not new_location:
new_location = original_object.location
if not new_home:
new_home = original_object.home
if not new_aliases:
new_aliases = original_object.aliases.all()
if not new_locks:
new_locks = original_object.db_lock_storage
if not new_permissions:
new_permissions = original_object.permissions.all()
if not new_destination:
new_destination = original_object.destination
# create new object
from evennia.utils import create
from evennia.scripts.models import ScriptDB
new_object = create.create_object(typeclass_path,
key=new_key,
location=new_location,
home=new_home,
permissions=new_permissions,
locks=new_locks,
aliases=new_aliases,
destination=new_destination)
if not new_object:
return None
# copy over all attributes from old to new.
for attr in original_object.attributes.all():
new_object.attributes.add(attr.key, attr.value)
# copy over all cmdsets, if any
for icmdset, cmdset in enumerate(original_object.cmdset.all()):
if icmdset == 0:
new_object.cmdset.add_default(cmdset)
else:
new_object.cmdset.add(cmdset)
# copy over all scripts, if any
for script in original_object.scripts.all():
ScriptDB.objects.copy_script(script, new_obj=new_object)
return new_object
def clear_all_sessids(self):
"""
Clear the db_sessid field of all objects having also the
db_player field set.
"""
self.filter(db_sessid__isnull=False).update(db_sessid=None)
class ObjectManager(ObjectDBManager, TypeclassManager):
pass
| |
# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import mock
from oslo.config import cfg
import webob.exc
from neutron.openstack.common import log as logging
from neutron.tests.unit.bigswitch import fake_server
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
LOG = logging.getLogger(__name__)
SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager'
HTTPS = SERVERMANAGER + '.HTTPSConnectionWithValidation'
CERTCOMBINER = SERVERMANAGER + '.ServerPool._combine_certs_to_file'
FILEPUT = SERVERMANAGER + '.ServerPool._file_put_contents'
GETCACERTS = SERVERMANAGER + '.ServerPool._get_ca_cert_paths'
GETHOSTCERT = SERVERMANAGER + '.ServerPool._get_host_cert_path'
SSLGETCERT = SERVERMANAGER + '.ssl.get_server_certificate'
FAKECERTGET = 'neutron.tests.unit.bigswitch.fake_server.get_cert_contents'
class test_ssl_certificate_base(test_plugin.NeutronDbPluginV2TestCase,
test_base.BigSwitchTestBase):
plugin_str = ('%s.NeutronRestProxyV2' %
test_base.RESTPROXY_PKG_PATH)
servername = None
cert_base = None
def _setUp(self):
self.servername = test_api_v2._uuid()
self.cert_base = cfg.CONF.RESTPROXY.ssl_cert_directory
self.host_cert_val = 'DUMMYCERTFORHOST%s' % self.servername
self.host_cert_path = os.path.join(
self.cert_base,
'host_certs',
'%s.pem' % self.servername
)
self.comb_cert_path = os.path.join(
self.cert_base,
'combined',
'%s.pem' % self.servername
)
self.ca_certs_path = os.path.join(
self.cert_base,
'ca_certs'
)
cfg.CONF.set_override('servers', ["%s:443" % self.servername],
'RESTPROXY')
self.setup_patches()
# Mock method SSL lib uses to grab cert from server
self.sslgetcert_m = mock.patch(SSLGETCERT, create=True).start()
self.sslgetcert_m.return_value = self.host_cert_val
# Mock methods that write and read certs from the file-system
self.fileput_m = mock.patch(FILEPUT, create=True).start()
self.certcomb_m = mock.patch(CERTCOMBINER, create=True).start()
self.getcacerts_m = mock.patch(GETCACERTS, create=True).start()
# this is used to configure what certificate contents the fake HTTPS
# lib should expect to receive
self.fake_certget_m = mock.patch(FAKECERTGET, create=True).start()
def setUp(self):
super(test_ssl_certificate_base, self).setUp(self.plugin_str)
class TestSslSticky(test_ssl_certificate_base):
def setUp(self):
self.setup_config_files()
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
self._setUp()
# Set fake HTTPS connection's expectation
self.fake_certget_m.return_value = self.host_cert_val
# No CA certs for this test
self.getcacerts_m.return_value = []
super(TestSslSticky, self).setUp()
def test_sticky_cert(self):
# SSL connection should be successful and cert should be cached
with contextlib.nested(
mock.patch(HTTPS, new=fake_server.HTTPSHostValidation),
self.network()
):
# CA certs should have been checked for
self.getcacerts_m.assert_has_calls([mock.call(self.ca_certs_path)])
# cert should have been fetched via SSL lib
self.sslgetcert_m.assert_has_calls(
[mock.call((self.servername, 443))]
)
# cert should have been recorded
self.fileput_m.assert_has_calls([mock.call(self.host_cert_path,
self.host_cert_val)])
# no ca certs, so host cert only for this combined cert
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
self.comb_cert_path)])
class TestSslHostCert(test_ssl_certificate_base):
def setUp(self):
self.setup_config_files()
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
self.httpsPatch = mock.patch(HTTPS, create=True,
new=fake_server.HTTPSHostValidation)
self.httpsPatch.start()
self._setUp()
# Set fake HTTPS connection's expectation
self.fake_certget_m.return_value = self.host_cert_val
# No CA certs for this test
self.getcacerts_m.return_value = []
# Pretend host cert exists
self.hcertpath_p = mock.patch(GETHOSTCERT,
return_value=(self.host_cert_path, True),
create=True).start()
super(TestSslHostCert, self).setUp()
def test_host_cert(self):
# SSL connection should be successful because of pre-configured cert
with self.network():
self.hcertpath_p.assert_has_calls([
mock.call(os.path.join(self.cert_base, 'host_certs'),
self.servername)
])
# sticky is disabled, no fetching allowed
self.assertFalse(self.sslgetcert_m.call_count)
# no ca certs, so host cert is only for this combined cert
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
self.comb_cert_path)])
class TestSslCaCert(test_ssl_certificate_base):
def setUp(self):
self.setup_config_files()
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
self.httpsPatch = mock.patch(HTTPS, create=True,
new=fake_server.HTTPSCAValidation)
self.httpsPatch.start()
self._setUp()
# pretend to have a few ca certs
self.getcacerts_m.return_value = ['ca1.pem', 'ca2.pem']
# Set fake HTTPS connection's expectation
self.fake_certget_m.return_value = 'DUMMYCERTIFICATEAUTHORITY'
super(TestSslCaCert, self).setUp()
def test_ca_cert(self):
# SSL connection should be successful because CA cert was present
# If not, attempting to create a network would raise an exception
with self.network():
# sticky is disabled, no fetching allowed
self.assertFalse(self.sslgetcert_m.call_count)
# 2 CAs and no host cert so combined should only contain both CAs
self.certcomb_m.assert_has_calls([mock.call(['ca1.pem', 'ca2.pem'],
self.comb_cert_path)])
class TestSslWrongHostCert(test_ssl_certificate_base):
def setUp(self):
self.setup_config_files()
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
self._setUp()
# Set fake HTTPS connection's expectation to something wrong
self.fake_certget_m.return_value = 'OTHERCERT'
# No CA certs for this test
self.getcacerts_m.return_value = []
# Pretend host cert exists
self.hcertpath_p = mock.patch(GETHOSTCERT,
return_value=(self.host_cert_path, True),
create=True).start()
super(TestSslWrongHostCert, self).setUp()
def test_error_no_cert(self):
# since there will already be a host cert, sticky should not take
# effect and there will be an error because the host cert's contents
# will be incorrect
tid = test_api_v2._uuid()
data = {}
data['network'] = {'tenant_id': tid, 'name': 'name',
'admin_state_up': True}
with mock.patch(HTTPS, new=fake_server.HTTPSHostValidation):
req = self.new_create_request('networks', data, 'json')
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPInternalServerError.code)
self.hcertpath_p.assert_has_calls([
mock.call(os.path.join(self.cert_base, 'host_certs'),
self.servername)
])
# sticky is enabled, but a host cert already exists so it shant fetch
self.assertFalse(self.sslgetcert_m.call_count)
# no ca certs, so host cert only for this combined cert
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
self.comb_cert_path)])
class TestSslNoValidation(test_ssl_certificate_base):
def setUp(self):
self.setup_config_files()
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
cfg.CONF.set_override('no_ssl_validation', True, 'RESTPROXY')
self._setUp()
super(TestSslNoValidation, self).setUp()
def test_validation_disabled(self):
# SSL connection should be successful without any certificates
# If not, attempting to create a network will raise an exception
with contextlib.nested(
mock.patch(HTTPS, new=fake_server.HTTPSNoValidation),
self.network()
):
# no sticky grabbing and no cert combining with no enforcement
self.assertFalse(self.sslgetcert_m.call_count)
self.assertFalse(self.certcomb_m.call_count)
| |
import sys
import os
import numpy as np
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
import pylab as plt
from sklearn.metrics import accuracy_score
import logging
import json
from collections import defaultdict
from sklearn.externals import joblib
import argparse
TOTAL_SIZE_OF_DATASET = 50000
TRAIN_SET_SIZE_PERCENTAGE = 80
TEST_SET_SIZE_PERCENTAGE = 20
CLASSIFICATION_PROBABILITY_THRESHOLD = 0.7
NUMBER_OF_CPUS_TO_USE = -1
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def load_dataset(dataset_file_paths):
dataset = defaultdict(list)
with open(dataset_file_paths, "r") as dataset_f:
features_names = next(dataset_f).split("\t")[1:-3]
for line in dataset_f:
fields = line.strip().split("\t")
fen, game_features, game_results = fields[0],fields[1:-3],fields[-2]
dataset[game_results].append([fen, game_features])
return (dataset, features_names)
def limit_dataset(dataset):
new_dataset = defaultdict(list)
samples_counter = 0
sorted_classes_names = sorted(dataset, key=lambda k: len(dataset[k]))
for indx, class_name in enumerate(sorted_classes_names):
batch_size = (TOTAL_SIZE_OF_DATASET - samples_counter) / (len(sorted_classes_names) - indx)
new_dataset[class_name] = dataset[class_name][:batch_size]
samples_counter += len(new_dataset[class_name])
return new_dataset
def split_dataset(dataset, n_feature):
X_train = np.empty((0, n_feature), dtype=bool)
X_test = np.empty((0, n_feature), dtype=bool)
y_train = np.empty((0,), dtype=bool)
y_test = np.empty((0,), dtype=bool)
train_terms_name = np.empty((0,), dtype=bool)
test_terms_name = np.empty((0,), dtype=bool)
train_size = float(TRAIN_SET_SIZE_PERCENTAGE) / 100
test_size = float(TEST_SET_SIZE_PERCENTAGE) / 100
for k, v in dataset.iteritems():
data = [sample[1] for sample in v]
target = [k] * len(data)
terms_name = [sample[0] for sample in v]
#Split arrays or matrices into random train and test subsets
splitted_lists = cross_validation.train_test_split(data, target, terms_name, train_size=train_size, test_size=test_size, random_state=None)
X_train = np.append(X_train, splitted_lists[0], axis=0)
X_test = np.append(X_test, splitted_lists[1], axis=0)
y_train = np.append(y_train, splitted_lists[2], axis=0)
y_test = np.append(y_test, splitted_lists[3], axis=0)
train_terms_name = np.append(train_terms_name, splitted_lists[4], axis=0)
test_terms_name = np.append(test_terms_name, splitted_lists[5], axis=0)
return Bunch(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, \
train_terms_name=train_terms_name, test_terms_name=test_terms_name)
def validation(y_test, y_pred, y_pred_with_unknown_cls, y_pred_fictive, labels):
cm = confusion_matrix(y_test, y_pred, labels)
cm_with_unknown = confusion_matrix(y_test, y_pred_with_unknown_cls, labels)
pred_counter = cm.sum()
true_pred_counter = cm.diagonal().sum()
false_pred_counter = pred_counter - true_pred_counter
unknown_pred_counter = cm_with_unknown[:, -1].sum()
logger.info("classifier accuracy score (without a probability threshold): %f" % (accuracy_score(y_test, y_pred)))
logger.info("classifier accuracy score (with a probability threshold of %s): %f" %
(CLASSIFICATION_PROBABILITY_THRESHOLD, accuracy_score(y_test, y_pred_fictive)))
logger.info("unknown class / all tested data ratio: %.2f%%" % ((float(unknown_pred_counter) / pred_counter) * 100))
logger.info("unknown class / false predictions ratio: %.2f%%" % ((float(unknown_pred_counter) / false_pred_counter) * 100))
logger.info("unknown class / true predictions ratio: %.2f%%" % ((float(unknown_pred_counter) / true_pred_counter) * 100))
def plot_cm(cm, labels, file_name):
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
fig.colorbar(cax)
fig.set_size_inches(18.5, 10.5)
ax.set_xticks(range(len(labels)))
ax.set_yticks(range(len(labels)))
ax.set_xticklabels(labels, rotation=45)
ax.set_yticklabels(labels, rotation=0)
plt.ylabel('True classes')
plt.xlabel('Predicted classes')
for i, row in enumerate(cm):
for j, column in enumerate(row):
ax.annotate(cm[i, j], xy=(j, i), va="center", ha="center")
fig.savefig(file_name, dpi=100)
def plot_confusion_matrices(y_test, y_pred, labels, confusionMatricesDir, stage):
cm = confusion_matrix(y_test, y_pred, labels)
# plot with counters
plot_cm(cm, labels, "%s/confusion_matrix_counters_%s.png" % (confusionMatricesDir, stage))
# plot with probabilities
np.set_printoptions(precision=2, suppress=True)
probabilities_cm = np.empty((len(labels), len(labels)), dtype=float)
for i, row in enumerate(cm):
for j, column in enumerate(row):
if sum(row):
probabilities_cm[i, j] = "{0:.2f}".format(float(cm[i, j]) / sum(row))
else:
probabilities_cm[i, j] = 0.0
plot_cm(probabilities_cm, labels, "%s/confusion_matrix_probabilities_%s.png" % (confusionMatricesDir, stage))
def produce_output(y_test, y_pred, y_probs, test_terms_name, false_predictions_f, unknown_predictions_f):
with open(false_predictions_f, "w") as false_pred_f, open(unknown_predictions_f, "w") as unknown_pred_f:
false_pred_f.write('index\tterm_name\torigin\tprediction\tprobability\n')
unknown_pred_f.write('index\tterm_name\torigin\tprediction\tprobability\n')
for i in range(len(y_pred)):
if y_probs[i] < CLASSIFICATION_PROBABILITY_THRESHOLD:
unknown_pred_f.write("%s\t%s\t%s\t%s\t%s\n" % (i, test_terms_name[i], y_test[i], y_pred[i], y_probs[i]))
continue
if y_pred[i] != y_test[i]:
false_pred_f.write("%s\t%s\t%s\t%s\t%s\n" % (i, test_terms_name[i], y_test[i], y_pred[i], y_probs[i]))
def process_prediction_vector(y_test, y_pred, y_pred_probabilities):
max_y_pred_probs = []
y_pred_with_unknown_cls = []
y_pred_fictive = []
for i, cls in enumerate(y_pred):
prob = max(y_pred_probabilities[i])
max_y_pred_probs.append(prob)
if prob < CLASSIFICATION_PROBABILITY_THRESHOLD:
y_pred_with_unknown_cls.append("unknown")
y_pred_fictive.append(y_test[i])
else:
y_pred_with_unknown_cls.append(y_pred[i])
y_pred_fictive.append(y_pred[i])
return (y_pred_with_unknown_cls, y_pred_fictive, max_y_pred_probs)
def ml_train(datasetFilePath, falsePredictionsFilePath, unknownPredictionsFilePath, confusionMatricesDir, classifierFilePath):
logger.info("start of training and testing phase")
classifier = OneVsRestClassifier(SVC(kernel='linear', probability=True), n_jobs=NUMBER_OF_CPUS_TO_USE)
logger.info("loading data set")
dataset, features_names = load_dataset(datasetFilePath)
#limited_dataset = limit_dataset(dataset)
limited_dataset = dataset
ml_dataset = split_dataset(limited_dataset, len(features_names))
logger.info("fitting training set X_train - %s, y_train - %s" % (ml_dataset.X_train.shape, ml_dataset.y_train.shape))
classifier.fit(ml_dataset.X_train, ml_dataset.y_train)
logger.info("predicting test set X_test - %s, y_test - %s" % (ml_dataset.X_test.shape, ml_dataset.y_test.shape))
y_pred = classifier.predict(ml_dataset.X_test)
y_pred_probabilities = classifier.predict_proba(ml_dataset.X_test)
y_pred_with_unknown_cls, y_pred_fictive, max_y_pred_probs = process_prediction_vector(ml_dataset.y_test, y_pred, y_pred_probabilities)
validation(ml_dataset.y_test, y_pred, y_pred_with_unknown_cls, y_pred_fictive, list(classifier.classes_) + ["unknown"])
plot_confusion_matrices(ml_dataset.y_test, y_pred, list(classifier.classes_) + ["unknown"], confusionMatricesDir, "1")
plot_confusion_matrices(ml_dataset.y_test, y_pred_with_unknown_cls, list(classifier.classes_) + ["unknown"], confusionMatricesDir, "2")
plot_confusion_matrices(ml_dataset.y_test, y_pred_fictive, list(classifier.classes_) + ["unknown"], confusionMatricesDir, "3")
produce_output(ml_dataset.y_test, y_pred, max_y_pred_probs, ml_dataset.test_terms_name, falsePredictionsFilePath, unknownPredictionsFilePath)
logger.info("exporting classifier model")
joblib.dump(classifier, classifierFilePath)
logger.info("end of training and testing phase")
def parseOptions():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-f", required=True, dest="datasetFilePath", help="The full path for the dataset file (.tsv)")
parser.add_argument("--false-predifction-f", required=True, dest="falsePredictionsFilePath", help="The full path for the false predictions output file (.tsv)")
parser.add_argument("--unknown-predictions-f", required=True, dest="unknownPredictionsFilePath", help="The full path for the unknown predictions output file (.tsv)")
parser.add_argument("--confusion-matrices-dir", required=True, dest="confusionMatricesDir", help="The full path for the confusion matrices directory")
parser.add_argument("--classifier-f", required=True, dest="classifierFilePath", help="The full path for the classifier main dump file (.pkl)")
return parser.parse_args()
def main():
options = parseOptions()
ml_train(options.datasetFilePath, options.falsePredictionsFilePath, options.unknownPredictionsFilePath, options.confusionMatricesDir, options.classifierFilePath)
if __name__ == '__main__':
path = "/home/taykey/code/learningchess/data/results/checkmateclassifier/results6/"
sys.argv.append("--dataset-f="+path+"input_10k_features.tsv")
sys.argv.append("--false-predifction-f="+path+"false_predictions_f.tsv")
sys.argv.append("--unknown-predictions-f="+path+"unknown_predictions_f.tsv")
sys.argv.append("--confusion-matrices-dir="+path+"classifier")
sys.argv.append("--classifier-f="+path+"classifier.pkl")
main()
| |
from __future__ import unicode_literals
from flask import Flask, render_template_string, Markup
from unittest import TestCase
import mock
import misaka
from misaka import (EXT_AUTOLINK, EXT_FENCED_CODE,
EXT_LAX_HTML_BLOCKS, EXT_NO_INTRA_EMPHASIS, EXT_SPACE_HEADERS,
EXT_STRIKETHROUGH, EXT_SUPERSCRIPT, EXT_TABLES, HTML_ESCAPE,
HTML_EXPAND_TABS, HTML_HARD_WRAP, HTML_SAFELINK, HTML_SKIP_HTML,
HTML_SKIP_IMAGES, HTML_SKIP_LINKS, HTML_SKIP_STYLE, HTML_SMARTYPANTS,
HTML_TOC, HTML_TOC_TREE, HTML_USE_XHTML, TABLE_ALIGNMASK, TABLE_ALIGN_C,
TABLE_ALIGN_L, TABLE_ALIGN_R, TABLE_HEADER)
from flask.ext.misaka import Misaka, markdown
TEST_MD = "*This* ~~contains~~ ``some`` mark^(down) extensions: www.markdown.com foo_bar_baz it's"
app = Flask(__name__)
app.debug = True
Misaka(app)
### templating tests ###
@app.route('/a')
def view_render_inline():
s = "This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline():
client = app.test_client()
resp = client.open('/a')
assert resp.data == b'<p>This is ~~restructuredtext~~ <em>markdown</em></p>\n'
@app.route('/b')
def view_render_var_block():
s = "This is a *markdown* block"
tpl = '''{% filter markdown %}{{s}}{% endfilter %}'''
return render_template_string(tpl, s=s)
def test_render_var_block():
client = app.test_client()
resp = client.open('/b')
assert resp.data == b'<p>This is a <em>markdown</em> block</p>\n'
@app.route('/c')
def view_render_in_block():
tpl = '''{% filter markdown %}This is a *markdown* block{% endfilter %}'''
return render_template_string(tpl)
def test_render_in_block():
client = app.test_client()
resp = client.open('/c')
assert resp.data == b'<p>This is a <em>markdown</em> block</p>\n'
### markdown extensions in templates
extapp = Flask(__name__)
extapp.debug = True
Misaka(extapp, strikethrough=True)
@extapp.route('/d')
def view_render_inline_ext():
s = "This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline_ext():
client = extapp.test_client()
resp = client.open('/d')
assert resp.data == b'<p>This is <del>restructuredtext</del> <em>markdown</em></p>\n'
# Note that the Markdown extension tests aren't actually testing that the
# Markdown is rendered correctly; that should be covered by the test suite of
# the misaka module. These tests should test that Flask-Misaka is calling
# the misaka module correctly, and returning the result unmodified
# (aside from being wrapped in a Markup class instance.)
@mock.patch("flask.ext.misaka.misaka.html", side_effect=misaka.html)
class MarkdownExtensionTests(TestCase):
def test_defaults(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext(self, html):
ext, flags = EXT_AUTOLINK, 0
result = markdown(TEST_MD, autolink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext(self, html):
ext, flags = EXT_FENCED_CODE | EXT_LAX_HTML_BLOCKS, 0
result = markdown(TEST_MD, fenced_code=True, lax_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_render(self, html):
ext, flags = 0, HTML_ESCAPE
result = markdown(TEST_MD, escape=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_render(self, html):
ext, flags = 0, HTML_HARD_WRAP | HTML_SAFELINK
result = markdown(TEST_MD, wrap=True, safelink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext_one_render(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, HTML_SKIP_HTML
result = markdown(TEST_MD, no_intra_emphasis=True, no_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext_two_render(self, html):
ext = EXT_STRIKETHROUGH | EXT_SUPERSCRIPT
flags = HTML_SKIP_LINKS | HTML_SKIP_STYLE
result = markdown(TEST_MD, strikethrough=True, superscript=True,
skip_links=True, no_style=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse_ext(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, 0
result = markdown(TEST_MD, intra_emphasis=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse_render(self, html):
ext, flags = 0, HTML_SKIP_STYLE
result = markdown(TEST_MD, style=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_undefined_option(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD, fireworks=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_defined_and_undefined_options(self, html):
ext, flags = 0, HTML_SMARTYPANTS
result = markdown(TEST_MD, smartypants=True, stupidpants=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_set_defaults(self, html):
ext, flags = EXT_TABLES, HTML_SMARTYPANTS
md = Misaka(smartypants=True, tables=True)
result = md.render(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_override_defaults(self, html):
ext, flags = 0, 0
md = Misaka(autolink=True)
result = md.render(TEST_MD, autolink=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
class FactoryPatternTests(TestCase):
def test_init(self):
md = Misaka()
app2 = Flask(__name__)
md.init_app(app2)
self.assertIn("markdown", app2.jinja_env.filters)
| |
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script parses conformance test output to produce testgrid entries
#
# Assumptions:
# - there is one log file and one JUnit file (true for current conformance tests..)
# - the log file contains ginkgo's output (true for kubetest and sonobuoy..)
# - the ginkgo output will give us start / end time, and overall success
#
# - the start timestamp is suitable as a testgrid ID (unique, monotonic)
#
# - the test ran in the current year unless --year is provided
# - the timestamps are parsed on a machine with the same local time (zone)
# settings as the machine that produced the logs
#
# The log file is the source of truth for metadata, the JUnit will be consumed
# by testgrid / gubernator for individual test case results
#
# Usage: see README.md
import re
import sys
import time
import datetime
import argparse
import json
import subprocess
from os import path
# logs often contain ANSI escape sequences
# https://stackoverflow.com/a/14693789
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
# NOTE e2e logs use go's time.StampMilli ("Jan _2 15:04:05.000")
# Example log line with a timestamp:
# Jan 26 06:38:46.284: INFO: Running AfterSuite actions on all node
# the third ':' separates the date from the rest
E2E_LOG_TIMESTAMP_RE = re.compile(r'(... .\d \d\d:\d\d:\d\d\.\d\d\d):.*')
# Ginkgo gives a line like the following at the end of successful runs:
# SUCCESS! -- 123 Passed | 0 Failed | 0 Pending | 587 Skipped PASS
# we match this to detect overall success
E2E_LOG_SUCCESS_RE = re.compile(r'SUCCESS! -- .* PASS')
def log_line_strip_escape_sequences(line):
return ANSI_ESCAPE_RE.sub('', line)
def parse_e2e_log_line_timestamp(line, year):
"""parses a ginkgo e2e log line for the leading timestamp
Args:
line (str) - the log line
year (str) - 'YYYY'
Returns:
timestamp (datetime.datetime) or None
"""
match = E2E_LOG_TIMESTAMP_RE.match(line)
if match is None:
return None
# note we add year to the timestamp because the actual timestamp doesn't
# contain one and we want a datetime object...
timestamp = year+' '+match.group(1)
return datetime.datetime.strptime(timestamp, '%Y %b %d %H:%M:%S.%f')
def parse_e2e_logfile(file_handle, year):
"""parse e2e logfile at path, assuming the log is from year
Args:
file_handle (file): the log file, iterated for lines
year (str): YYYY year logfile is from
Returns:
started (datetime.datetime), finished (datetime.datetime), passed (boolean)
"""
started = finished = None
passed = False
for line in file_handle:
line = log_line_strip_escape_sequences(line)
# try to get a timestamp from each line, keep the first one as
# start time, and the last one as finish time
timestamp = parse_e2e_log_line_timestamp(line, year)
if timestamp:
if started:
finished = timestamp
else:
started = timestamp
# if we found the ginkgo success line then the run passed
is_success = E2E_LOG_SUCCESS_RE.match(line)
if is_success:
passed = True
return started, finished, passed
def datetime_to_unix(datetime_obj):
"""convert datetime.datetime to unix timestamp"""
return int(time.mktime(datetime_obj.timetuple()))
def testgrid_started_json_contents(start_time):
"""returns the string contents of a testgrid started.json file
Args:
start_time (datetime.datetime)
Returns:
contents (str)
"""
started = datetime_to_unix(start_time)
return json.dumps({
'timestamp': started
})
def testgrid_finished_json_contents(finish_time, passed):
"""returns the string contents of a testgrid finished.json file
Args:
finish_time (datetime.datetime)
passed (bool)
Returns:
contents (str)
"""
finished = datetime_to_unix(finish_time)
result = 'SUCCESS' if passed else 'FAILURE'
return json.dumps({
'timestamp': finished,
'result': result
})
def upload_string(gcs_path, text):
"""Uploads text to gcs_path"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain', 'cp', '-', gcs_path]
print >>sys.stderr, 'Run:', cmd, 'stdin=%s'%text
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
proc.communicate(input=text)
if proc.returncode != 0:
raise RuntimeError("Failed to upload with exit code: %d" % proc.returncode)
def upload_file(gcs_path, file_path):
"""Uploads file at file_path to gcs_path"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain', 'cp', file_path, gcs_path]
print >>sys.stderr, 'Run:', cmd
proc = subprocess.Popen(cmd)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError('Failed to upload with exit code: %d' % proc.returncode)
def parse_args(cli_args=None):
if cli_args is None:
cli_args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help=('GCS bucket to upload the results to,'
' of the form \'gs://foo/bar\''),
required=True,
)
parser.add_argument(
'--year',
help=('the year in which the log is from, defaults to the current year.'
' format: YYYY'),
default=str(datetime.datetime.now().year),
)
parser.add_argument(
'--junit',
help='path to the junit xml results file',
required=True,
)
parser.add_argument(
'--log',
help='path to the test log file, should contain the ginkgo output',
required=True,
)
return parser.parse_args(args=cli_args)
def main(cli_args):
args = parse_args(cli_args)
log, junit, year, bucket = args.log, args.junit, args.year, args.bucket
# parse the e2e.log for start time, finish time, and success
with open(log) as file_handle:
started, finished, passed = parse_e2e_logfile(file_handle, year)
# convert parsed results to testgrid json metadata blobs
started_json = testgrid_started_json_contents(started)
finished_json = testgrid_finished_json_contents(finished, passed)
# use timestamp as build ID
gcs_dir = bucket + '/' + str(datetime_to_unix(started))
# upload metadata, log, junit to testgrid
print 'Uploading entry to: %s' % gcs_dir
upload_string(gcs_dir+'/started.json', started_json)
upload_string(gcs_dir+'/finished.json', finished_json)
upload_file(gcs_dir+'/build-log.txt', log)
upload_file(gcs_dir+'/artifacts/'+path.basename(junit), junit)
print 'Done.'
if __name__ == '__main__':
main(sys.argv[1:])
| |
# filtering regular expressions, used to strip out annoyances like ads,
# web bugs and the like from feeds
import re
import tembozapp.degunk as degunk
# uncomment this if you have made changes to the degunk module
# reload(degunk)
filter_list = [
# don't mess with breaks
degunk.Re('(<br\s+[^>]*>)', 0, '<br>'),
# Blegs
degunk.Re('<a href="http://www.bloglines.com/sub/.*?</a>'),
degunk.Re('<a href="http://del.icio.us/post.*?</a>'),
degunk.Re('<a href="http://digg.com/submit.*?</a>'),
degunk.Re('<a href="http://www.furl.net/storeIt.jsp.*?</a>'),
degunk.Re('<a href="http://ma.gnolia.com/bookmarklet/add.*?</a>'),
degunk.Re('<a href="http://www.propeller.com/submit.*?</a>'),
degunk.Re('<a href="http://reddit.com/submit.*?</a>'),
degunk.Re('<a href="http://www.sphere.com/search\\?q=sphereit.*?</a>'),
degunk.Re('<a href="http://www.stumbleupon.com/submit.*?</a>'),
degunk.Re('<a href="http://tailrank.com/share/.*?</a>'),
degunk.Re('<a href="http://technorati.com/faves\\?add.*?</a>'),
degunk.Re('<a href="http://www.feedburner.com/fb/a/emailFlare.*?</a>'),
degunk.Re('<a href="http://slashdot.org/bookmark.pl.*?</a>'),
degunk.Re('<a href="http://www.facebook.com/sharer?.php.*?</a>'),
degunk.Re('<a href="http://www.google.com/bookmarks/mark.*?</a>'),
degunk.Re('<a href="http://blinklist.com.*?</a>'),
degunk.Re('<a href="http://del.irio.us.*?</a>'),
degunk.Re('<a href="http://www.kaboodle.com.*?</a>'),
degunk.Re('<a href="http://www.newsvine.com.*?</a>'),
degunk.Re('<p class="addtoany_.*?</p>', re.MULTILINE + re.DOTALL),
degunk.Re('<a[^>]*href="[^"]*addtoany.com.*?</a>', re.MULTILINE + re.DOTALL),
degunk.Re('<div class="social_bookmark">.*?</div>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'<a href="http://www.pheedcontent.com.*?</a>\s*'),
degunk.Re('<div class="zemanta.*?</div>', re.MULTILINE + re.DOTALL),
degunk.Re('<p>Follow us on Twitter.*?</p>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
degunk.Re('<div class="tweetmeme_button".*?</div>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
degunk.Re('<p><a href="[^"]*sharethis.com.*?</p>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
degunk.Re('<a href="[^">]*.tweetmeme.com.*?</a>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
degunk.Re('<a [^>]* href="http://twitter.com/home/[?]status.*?</a>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
degunk.Re('<a [^>]*feedblitz.com.*?</a>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
# Feedburner annoyances
degunk.Re('<a href[^>]*><img src="http://feeds.feedburner[^>]*></a>'),
degunk.Re('<p><a href="(http://feeds\\.[^"/>]*/~./)[^"]*">'
'<img src="\\1[^>]*></a></p>'),
degunk.Re('<img src="http://feeds.feedburner.com.*?/>'),
degunk.Re('<div>\\s*<a href="[^"]*/~ff/.*?</div>', re.IGNORECASE + re.DOTALL),
# web bugs dumb enough to reveal themselves
degunk.Re('<img[^>]*width="1"[^>]*height="1"[^>]*>'),
degunk.Re('<img[^>]*height="1"[^>]*width="1"[^>]*>'),
degunk.Re('<img[^>]*width="0"[^>]*height="0"[^>]*>'),
degunk.Re('<img[^>]*height="0"[^>]*width="0"[^>]*>'),
# Google ads
degunk.Re('(<p>)?<a[^>]*href="http://[a-z]*ads.googleadservices[^>]*>'
'[^<>]*<img [^<>]*></a>(</p>)?', re.MULTILINE),
degunk.Re('<a[^>]*href="http://www.google.com/ads_by_google[^>]*>[^<>]*</a>',
re.MULTILINE),
degunk.Re('<p><map[^>]*><area[^>]*href="http://imageads.google.*?</p>',
re.MULTILINE),
# Wordpress stats
degunk.Re('<img[^>]*src="http://feeds.wordpress[^>]*>'),
# Falk AG ads
degunk.Re('<div><br>\s*<strong>.*?<a href="[^"]*falkag.net[^>]*>.*?</strong>'
'<br>.*?</div>', re.IGNORECASE + re.DOTALL),
degunk.Re('<a href="[^"]*falkag.net[^>]*><img[^>]*></a>'),
# Empty paragraphs used as spacers in front of ads
degunk.Re('<p> </p>'),
degunk.Re(r'<p><br />\s*</p>\s*', re.MULTILINE),
degunk.Re(r'\s*(<br>)?<p>\s*<br>\s*</p>\s*', re.MULTILINE),
# DoubleClick ads
degunk.Re('<a[^>]*href="http://ad.doubleclick.net[^>]*>.*?</a>',
re.MULTILINE),
degunk.Re('<p>ADVERTISEMENT.*?</p>'),
# Yahoo ads
degunk.Re('<p class="adv">.*?</p>'),
# Commindo ads
degunk.Re('<div.*<img[^>]*commindo-media.*?</div>',
re.MULTILINE + re.DOTALL),
# annoying forms inside posts, e.g. Russell Beattie
degunk.Re('<form.*?</form>', re.IGNORECASE + re.DOTALL),
# Weblogs Inc, ads
degunk.Re('<p><a[^>]*href="http://feeds.gawker.com[^>]*>[^<>]*'
'<img [^>]*src="http://feeds.gawker.com[^<>]*></a></p>',
re.MULTILINE),
# annoying Weblogs Inc. footer
degunk.Re('<h([0-9])></h\1>'),
degunk.Re('<a href=[^>]*>Permalink</a>.*?<a [^>]*>'
'Email this</a>.*?Comments</a>',
re.IGNORECASE + re.DOTALL),
degunk.Re('<p><font size="1"><hr />SPONSORED BY.*?</p>'),
# Engadget ads
degunk.Re('<hr /><p>SPONSORED BY.*?</p>\s*', re.MULTILINE),
degunk.Re('<p.*?originally appeared on.*?terms for use of feeds.*?</p>',
re.MULTILINE),
# Gawker cross-shilling
degunk.Re(' <br><a href=[^>]*>Comment on this post</a>\s*<br>Related.*',
re.IGNORECASE + re.DOTALL),
degunk.Re('<div class="feedflare">.*?</div>', re.IGNORECASE + re.DOTALL),
# Le Figaro cross-shilling
degunk.Re('<div class="mf-related"><p>Articles en rapport.*</div>',
re.IGNORECASE + re.DOTALL),
# Pheedo ads
degunk.Re('<div style="font-size: xx-small; color: gray; padding-bottom:'
'0.5em;">Presented By:</div>[^<>]*<div><a href="http://ads.pheedo'
'.*?</div>.*?</div>',
re.MULTILINE + re.DOTALL),
degunk.Re('<a[^>]*href="http://[^">]*pheedo.com.*?</a>',
re.MULTILINE + re.DOTALL),
degunk.Re('<img[^>]*src="http://[^">]*pheedo.com.*?>',
re.MULTILINE + re.DOTALL),
# Broken Pheedo links for IEEE Spectrum
degunk.ReUrl(url=r'http://pheedo.com\1',
regex_url=r'http://www.pheedo.com(.*)'),
# Triggit ads
degunk.Re('(<br>)*<img[^>]*triggit.com.*?>', re.MULTILINE + re.DOTALL),
# Web bugs
degunk.Re('<img[^>]*quantserve.com.*?>', re.MULTILINE + re.DOTALL),
degunk.Re('<img [^>]*invitemedia.com[^>]*>',
re.MULTILINE + re.DOTALL + re.IGNORECASE),
# Mediafed ads
degunk.Re('<br><a[^>]* href="?http://[^"]*.feedsportal.com.*?</a>',
re.MULTILINE + re.DOTALL),
# IDFuel URLs should point to full article, not teaser
degunk.ReUrl(url=r'http://www.idfuel.com/index.php?p=\1&more=1',
regex_url=r'http://www.idfuel.com/index.php\?p=([0-9]*)'),
# Strip The Register redirection that causes link_already() to fail
degunk.ReUrl(
url=r'\1', regex_url=r'http://go.theregister.com/feed/(http://.*)'),
# Same for I Cringely
degunk.ReUrl(
url=r'http://www.pbs.org/cringely/\1',
regex_url=r'http://www.pbs.org/cringely/rss1/redir/cringely/(.*)'),
# Register ads
degunk.Re('<strong>Advertisement</strong><br>'),
degunk.Re('<p><a[^>]*href="http://whitepapers.theregister.co.uk.*?</p>',
re.MULTILINE),
# Inquirer blegging
degunk.Re('<div class="mf-viral">.*</div>'),
# Feediz ads
degunk.Re('<p>.*?feediz.com.*?</p>', re.MULTILINE + re.DOTALL),
degunk.Re('<a [^>]*feediz.com.*?</a>', re.MULTILINE + re.DOTALL),
# Salon ads
degunk.Re('<p><a href="http://feeds.salon.com/~a[^>]*><img '
'[^>]*></a></p><img[^>]*>'),
# RWW ads
degunk.Re('<p align="right" class="ad">.*?</p>'),
# bypass Digg
degunk.Dereference('digg.com', '<h3 id="title1"><a href="([^"]*)"'),
# DoubleClick ads
degunk.Re('<a href="http://[^"]*doubleclick.*?</a>',
re.MULTILINE + re.DOTALL),
# If I want to share, I can do it myself, thanks
degunk.Re('<p class="akst_link">.*?</p>', re.MULTILINE + re.DOTALL),
# Daily Python URL should link to actual articles, not to itself
degunk.UseFirstLink('http://www.pythonware.com/daily/'),
degunk.ReTitle('\\1', '<div class="description">.*?<a href=.*?>(.*?)</a>',
re.MULTILINE + re.DOTALL),
# also broken
degunk.UseFirstLink('http://evanmiller.org/'),
# Inquirer clutter
degunk.Re('<p><small>[^<>]*<a href="http://www.theinquirer.net[^<>]*><i>'
'[^<>]*Read the full article.*', re.MULTILINE + re.DOTALL),
degunk.Re('<p><small>[^<>]*<a href="http://www.theinquirer.net.*?<i>',
re.MULTILINE),
# List apart T-shirt shilling
degunk.Re('<p><em><strong>Hide Your Shame:</strong> The A List Apart Store'
'.*?</p>', re.MULTILINE + re.DOTALL),
# Other misc shilling
degunk.Re('<p>.*<a href="http://www.beyondsecurity.com.*?</p>',
re.MULTILINE + re.DOTALL),
degunk.Re('<fieldset class="zemanta-related">.*?</ul>',
re.MULTILINE + re.DOTALL),
# possibly caused by bugs in feedparser
degunk.Re('<br>[.>]<br>', 0, '<br>', iterate=True),
# unwarranted multiple empty lines
degunk.Re('<br>\s*(<br>\s*)+', 0, '<br>'),
degunk.Re('<p> </p>'),
degunk.Re('<p [^>]*></p>'),
degunk.Re('<p>-</p>'),
degunk.Re('<span[^>]*></span>', 0, '', iterate=True),
# junk
degunk.Re('<strong></strong>', 0, ''),
# unwarranted final empty lines
degunk.Re('(<br>\s*)+$'),
# leftover from blegs or ads
degunk.Re('-\s+(-\s+)+'),
# GigaOM annoyances
degunk.Re(r'<img[^>]*src="http://stats.wordpress.com.*?>'),
degunk.Re(r'\s*<hr[^>]*>\s*<p>\s*<a href="http://t.gigaom.com/.*?</p>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'<hr\s?/?>\s*<a href="http://events.gigaom.com/.*</a>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'<hr\s?/?>\s*<a href="http://pro.gigaom.com/.*</a>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'\s*<hr[^>]*>\s*<p>\s*<a href="http://gigaom.com/sponsor.*?</p>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'\s*<hr[^>]*>\s*<p>\s*<a href="http://ads.gigaom.com.*?</p>',
re.MULTILINE + re.DOTALL),
# Guardian Related sidebar
degunk.Re(r'<div class="related" style="float.*?</div>',
re.MULTILINE + re.DOTALL),
# PopSci Related sidebar
degunk.Re(r'<div class="relatedinfo".*?</div>', re.MULTILINE + re.DOTALL),
# Ars Technica
degunk.Re(r'<a [^>]* title="Click here to continue reading.*?</a>',
re.MULTILINE + re.DOTALL),
degunk.Re('<a href="http://arstechnica.com[^>]*>[^<>]*'
'<img [^>]*brief_icons.*?</a>',
re.MULTILINE + re.DOTALL),
# Coding Horror
degunk.Re(r'<table>.*?\[advertisement\].*?</table>',
re.MULTILINE + re.DOTALL),
# Fooducate
degunk.Re(r'<p><span[^>]*><strong>Get Fooducated.*?</p>',
re.MULTILINE + re.DOTALL),
degunk.Re(r'<p>[^>]*<a href="http://alpha.fooducate.com.*?</p>',
re.MULTILINE + re.DOTALL),
# ReadWriteWeb ads
degunk.Re(r'<p align="right"><em>Sponsor</em><br>.*?</p>',
re.MULTILINE + re.DOTALL),
# Laughing Squid
degunk.Re('<p><hr />\s*<p>\\s*<a href="http://laughingsquid.us/">'
'.*?Laughing Squid Web Hosting</a>.</p></p>',
re.MULTILINE + re.DOTALL),
# FeedBlitz
degunk.Re('<table.*?feedblitz.com.*?</table>',
re.MULTILINE + re.DOTALL),
# Use m.xkcd.com instead of desktop xkcd to get the alt text
degunk.ReUrl(url=r'http://m.xkcd.com\1',
regex_url=r'http://xkcd.com(.*)'),
# Medium
degunk.Re('<figure.*?https://cdn-images-1.medium.com/max/700/1*PZjwR1Nbluff5IMI6Y1T6g@2x.png.*?</figure>',
re.MULTILINE + re.DOTALL),
degunk.Re('<p>.*?on Medium, where people are continuing the conversation by highlighting and responding to this story.*?/p>',
re.MULTILINE + re.DOTALL),
# AnandTech
degunk.Re('<p align=center>'
'<a href="http://dynamic[^"]*.anandtech.com/www/delivery/'
'.*?</[ap]>',
re.MULTILINE + re.DOTALL),
degunk.Re('<p align=center>'
'<a href=\'http://dynamic[^\']*.anandtech.com/www/delivery/'
'.*?</[ap]>',
re.MULTILINE + re.DOTALL),
]
| |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Flat network GlusterFS Driver.
Manila shares are subdirectories within a GlusterFS volume. The backend,
a GlusterFS cluster, uses one of the two NFS servers, Gluster-NFS or
NFS-Ganesha, based on a configuration option, to mediate access to the shares.
NFS-Ganesha server supports NFSv3 and v4 protocols, while Gluster-NFS
server supports only NFSv3 protocol.
TODO(rraja): support SMB protocol.
"""
import errno
import os
import re
import sys
import xml.etree.cElementTree as etree
from oslo_config import cfg
from oslo_log import log
import six
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila.i18n import _LW
from manila.share import driver
from manila.share.drivers import ganesha
from manila.share.drivers.ganesha import utils as ganesha_utils
LOG = log.getLogger(__name__)
GlusterfsManilaShare_opts = [
cfg.StrOpt('glusterfs_target',
help='Specifies the GlusterFS volume to be mounted on the '
'Manila host. It is of the form '
'[remoteuser@]<volserver>:<volid>.'),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base directory containing mount points for Gluster '
'volumes.'),
cfg.StrOpt('glusterfs_nfs_server_type',
default='Gluster',
help='Type of NFS server that mediate access to the Gluster '
'volumes (Gluster or Ganesha).'),
cfg.StrOpt('glusterfs_server_password',
default=None,
secret=True,
help="Remote GlusterFS server node's login password. "
"This is not required if 'glusterfs_path_to_private_key'"
' is configured.'),
cfg.StrOpt('glusterfs_path_to_private_key',
default=None,
help='Path of Manila host\'s private SSH key file.'),
cfg.StrOpt('glusterfs_ganesha_server_ip',
default=None,
help="Remote Ganesha server node's IP address."),
cfg.StrOpt('glusterfs_ganesha_server_username',
default='root',
help="Remote Ganesha server node's username."),
cfg.StrOpt('glusterfs_ganesha_server_password',
default=None,
secret=True,
help="Remote Ganesha server node's login password. "
"This is not required if 'glusterfs_path_to_private_key'"
' is configured.'),
]
CONF = cfg.CONF
CONF.register_opts(GlusterfsManilaShare_opts)
NFS_EXPORT_DIR = 'nfs.export-dir'
NFS_EXPORT_VOL = 'nfs.export-volumes'
GLUSTERFS_VERSION_MIN = (3, 5)
class GlusterManager(object):
"""Interface with a GlusterFS volume."""
scheme = re.compile('\A(?:(?P<user>[^:@/]+)@)?'
'(?P<host>[^:@/]+)'
'(?::/(?P<vol>.+))?')
def __init__(self, address, execf, path_to_private_key=None,
remote_server_password=None, has_volume=True):
"""Initialize a GlusterManager instance.
:param address: the Gluster URI (in [<user>@]<host>:/<vol> format).
:param execf: executor function for management commands.
:param path_to_private_key: path to private ssh key of remote server.
:param remote_server_password: ssh password for remote server.
:param has_volume: instruction to uri parser regarding how to deal
with the optional volume part (True: require its
presence, False: require its absence, None: don't
require anything about volume).
"""
m = self.scheme.search(address)
if m:
self.volume = m.group('vol')
if (has_volume is True and not self.volume) or (
has_volume is False and self.volume):
m = None
if not m:
raise exception.GlusterfsException(
_('Invalid gluster address %s.') % address)
self.remote_user = m.group('user')
self.host = m.group('host')
self.management_address = '@'.join(
filter(None, (self.remote_user, self.host)))
self.qualified = address
if self.volume:
self.export = ':/'.join([self.host, self.volume])
else:
self.export = None
self.path_to_private_key = path_to_private_key
self.remote_server_password = remote_server_password
self.gluster_call = self.make_gluster_call(execf)
def make_gluster_call(self, execf):
"""Execute a Gluster command locally or remotely."""
if self.remote_user:
gluster_execf = ganesha_utils.SSHExecutor(
self.host, 22, None, self.remote_user,
password=self.remote_server_password,
privatekey=self.path_to_private_key)
else:
gluster_execf = ganesha_utils.RootExecutor(execf)
return lambda *args, **kwargs: gluster_execf(*(('gluster',) + args),
**kwargs)
def get_gluster_vol_option(self, option):
"""Get the value of an option set on a GlusterFS volume."""
args = ('--xml', 'volume', 'info', self.volume)
try:
out, err = self.gluster_call(*args)
except exception.ProcessExecutionError as exc:
LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
raise exception.GlusterfsException("gluster %s failed" %
' '.join(args))
if not out:
raise exception.GlusterfsException(
'gluster volume info %s: no data received' %
self.volume
)
vix = etree.fromstring(out)
if int(vix.find('./volInfo/volumes/count').text) != 1:
raise exception.InvalidShare('Volume name ambiguity')
for e in vix.findall(".//option"):
o, v = (e.find(a).text for a in ('name', 'value'))
if o == option:
return v
def get_gluster_version(self):
"""Retrieve GlusterFS version.
:returns: version (as tuple of strings, example: ('3', '6', '0beta2'))
"""
try:
out, err = self.gluster_call('--version')
except exception.ProcessExecutionError as exc:
raise exception.GlusterfsException(
_("'gluster version' failed on server "
"%(server)s: %(message)s") %
{'server': self.host, 'message': six.text_type(exc)})
try:
owords = out.split()
if owords[0] != 'glusterfs':
raise RuntimeError
vers = owords[1].split('.')
# provoke an exception if vers does not start with two numerals
int(vers[0])
int(vers[1])
except Exception:
raise exception.GlusterfsException(
_("Cannot parse version info obtained from server "
"%(server)s, version info: %(info)s") %
{'server': self.host, 'info': out})
return vers
def check_gluster_version(self, minvers):
"""Retrieve and check GlusterFS version.
:param minvers: minimum version to require
(given as tuple of integers, example: (3, 6))
"""
vers = self.get_gluster_version()
if self.numreduct(vers) < minvers:
raise exception.GlusterfsException(_(
"Unsupported GlusterFS version %(version)s on server "
"%(server)s, minimum requirement: %(minvers)s") % {
'server': self.host,
'version': '.'.join(vers),
'minvers': '.'.join(six.text_type(c) for c in minvers)})
@staticmethod
def numreduct(vers):
"""The numeric reduct of a tuple of strings.
That is, applying an integer conversion map on the longest
initial segment of vers which consists of numerals.
"""
numvers = []
for c in vers:
try:
numvers.append(int(c))
except ValueError:
break
return tuple(numvers)
class GlusterfsShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
driver.ShareDriver,):
"""Execute commands relating to Shares."""
def __init__(self, *args, **kwargs):
super(GlusterfsShareDriver, self).__init__(False, *args, **kwargs)
self._helpers = {}
self.gluster_manager = None
self.configuration.append_config_values(GlusterfsManilaShare_opts)
self.backend_name = self.configuration.safe_get(
'share_backend_name') or 'GlusterFS'
def do_setup(self, context):
"""Prepares the backend and appropriate NAS helpers."""
super(GlusterfsShareDriver, self).do_setup(context)
if not self.configuration.glusterfs_target:
raise exception.GlusterfsException(
_('glusterfs_target configuration that specifies the GlusterFS'
' volume to be mounted on the Manila host is not set.'))
self.gluster_manager = GlusterManager(
self.configuration.glusterfs_target,
self._execute,
self.configuration.glusterfs_path_to_private_key,
self.configuration.glusterfs_server_password,
)
self.gluster_manager.check_gluster_version(GLUSTERFS_VERSION_MIN)
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed.'))
else:
raise
# enable quota options of a GlusteFS volume to allow
# creation of shares of specific size
args = ('volume', 'quota', self.gluster_manager.volume, 'enable')
try:
self.gluster_manager.gluster_call(*args)
except exception.ProcessExecutionError as exc:
if (self.gluster_manager.
get_gluster_vol_option('features.quota')) != 'on':
LOG.error(_LE("Error in tuning GlusterFS volume to enable "
"creation of shares of specific size: %s"),
exc.stderr)
raise exception.GlusterfsException(exc)
self._setup_helpers()
self._ensure_gluster_vol_mounted()
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
# TODO(rraja): The below seems crude. Accommodate CIFS helper as well?
nfs_helper = getattr(
sys.modules[__name__],
self.configuration.glusterfs_nfs_server_type + 'NFSHelper')
self._helpers['NFS'] = nfs_helper(self._execute,
self.configuration,
gluster_manager=self.gluster_manager)
for helper in self._helpers.values():
helper.init_helper()
def check_for_setup_error(self):
pass
def _get_mount_point_for_gluster_vol(self):
"""Return mount point for the GlusterFS volume."""
return os.path.join(self.configuration.glusterfs_mount_point_base,
self.gluster_manager.volume)
def _do_mount(self, cmd, ensure):
"""Execute the mount command based on 'ensure' parameter.
:param cmd: command to do the actual mount
:param ensure: boolean to allow remounting a volume with a warning
"""
try:
self._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
LOG.warn(_LW("%s is already mounted"),
self.gluster_manager.export)
else:
raise exception.GlusterfsException(
'Unable to mount Gluster volume'
)
def _mount_gluster_vol(self, mount_path, ensure=False):
"""Mount GlusterFS volume at the specified mount path."""
self._execute('mkdir', '-p', mount_path)
command = ['mount', '-t', 'glusterfs', self.gluster_manager.export,
mount_path]
self._do_mount(command, ensure)
def _ensure_gluster_vol_mounted(self):
"""Ensure GlusterFS volume is native-mounted on Manila host."""
mount_path = self._get_mount_point_for_gluster_vol()
try:
self._mount_gluster_vol(mount_path, ensure=True)
except exception.GlusterfsException:
LOG.error(_LE('Could not mount the Gluster volume %s'),
self.gluster_manager.volume)
raise
def _get_local_share_path(self, share):
"""Determine mount path of the GlusterFS volume in the Manila host."""
local_vol_path = self._get_mount_point_for_gluster_vol()
if not os.access(local_vol_path, os.R_OK):
raise exception.GlusterfsException('share path %s does not exist' %
local_vol_path)
return os.path.join(local_vol_path, share['name'])
def _update_share_stats(self):
"""Retrieve stats info from the GlusterFS volume."""
# sanity check for gluster ctl mount
smpb = os.stat(self.configuration.glusterfs_mount_point_base)
smp = os.stat(self._get_mount_point_for_gluster_vol())
if smpb.st_dev == smp.st_dev:
raise exception.GlusterfsException(
_("GlusterFS control mount is not available")
)
smpv = os.statvfs(self._get_mount_point_for_gluster_vol())
data = dict(
storage_protocol='NFS',
vendor_name='Red Hat',
share_backend_name=self.backend_name,
reserved_percentage=self.configuration.reserved_share_percentage,
total_capacity_gb=(smpv.f_blocks * smpv.f_frsize) >> 30,
free_capacity_gb=(smpv.f_bavail * smpv.f_frsize) >> 30)
super(GlusterfsShareDriver, self)._update_share_stats(data)
def get_network_allocations_number(self):
return 0
def create_share(self, ctx, share, share_server=None):
"""Create a sub-directory/share in the GlusterFS volume."""
# probe into getting a NAS protocol helper for the share in order
# to facilitate early detection of unsupported protocol type
self._get_helper(share)
sizestr = six.text_type(share['size']) + 'GB'
share_dir = '/' + share['name']
local_share_path = self._get_local_share_path(share)
cmd = ['mkdir', local_share_path]
# set hard limit quota on the sub-directory/share
args = ('volume', 'quota', self.gluster_manager.volume,
'limit-usage', share_dir, sizestr)
try:
self._execute(*cmd, run_as_root=True)
self.gluster_manager.gluster_call(*args)
except exception.ProcessExecutionError as exc:
self._cleanup_create_share(local_share_path, share['name'])
LOG.error(_LE('Unable to create share %s'), share['name'])
raise exception.GlusterfsException(exc)
export_location = os.path.join(self.gluster_manager.qualified,
share['name'])
return export_location
def _cleanup_create_share(self, share_path, share_name):
"""Cleanup share that errored out during its creation."""
if os.path.exists(share_path):
cmd = ['rm', '-rf', share_path]
try:
self._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError as exc:
LOG.error(_LE('Cannot cleanup share, %s, that errored out '
'during its creation, but exists in GlusterFS '
'volume.'), share_name)
raise exception.GlusterfsException(exc)
def delete_share(self, context, share, share_server=None):
"""Remove a sub-directory/share from the GlusterFS volume."""
local_share_path = self._get_local_share_path(share)
cmd = ['rm', '-rf', local_share_path]
try:
self._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError:
LOG.error(_LE('Unable to delete share %s'), share['name'])
raise
def create_snapshot(self, context, snapshot, share_server=None):
"""TBD: Is called to create snapshot."""
raise NotImplementedError()
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
raise NotImplementedError()
def delete_snapshot(self, context, snapshot, share_server=None):
"""TBD: Is called to remove snapshot."""
raise NotImplementedError()
def ensure_share(self, context, share, share_server=None):
"""Might not be needed?"""
pass
def _get_helper(self, share):
"""Choose a protocol specific helper class."""
if share['share_proto'] == 'NFS':
return self._helpers['NFS']
else:
raise exception.InvalidShare(
reason=(_('Unsupported share type, %s.')
% share['share_proto']))
def allow_access(self, context, share, access, share_server=None):
"""Allow access to the share."""
self._get_helper(share).allow_access('/', share, access)
def deny_access(self, context, share, access, share_server=None):
"""Deny access to the share."""
self._get_helper(share).deny_access('/', share, access)
class GlusterNFSHelper(ganesha.NASHelperBase):
"""Manage shares with Gluster-NFS server."""
def __init__(self, execute, config_object, **kwargs):
self.gluster_manager = kwargs.pop('gluster_manager')
super(GlusterNFSHelper, self).__init__(execute, config_object,
**kwargs)
def init_helper(self):
# exporting the whole volume must be prohibited
# to not to defeat access control
args = ('volume', 'set', self.gluster_manager.volume, NFS_EXPORT_VOL,
'off')
try:
self.gluster_manager.gluster_call(*args)
except exception.ProcessExecutionError as exc:
LOG.error(_LE("Error in tuning GlusterFS volume to prevent "
"exporting the entire volume: %s"), exc.stderr)
raise exception.GlusterfsException("gluster %s failed" %
' '.join(args))
def _get_export_dir_dict(self):
"""Get the export entries of shares in the GlusterFS volume."""
export_dir = self.gluster_manager.get_gluster_vol_option(
NFS_EXPORT_DIR)
edh = {}
if export_dir:
# see
# https://github.com/gluster/glusterfs
# /blob/aa19909/xlators/nfs/server/src/nfs.c#L1582
# regarding the format of nfs.export-dir
edl = export_dir.split(',')
# parsing export_dir into a dict of {dir: [hostpec,..]..}
# format
r = re.compile('\A/(.*)\((.*)\)\Z')
for ed in edl:
d, e = r.match(ed).groups()
edh[d] = e.split('|')
return edh
def _manage_access(self, share_name, access_type, access_to, cbk):
"""Manage share access with cbk.
Adjust the exports of the Gluster-NFS server using cbk.
:param share_name: name of the share
:type share_name: string
:param access_type: type of access allowed in Manila
:type access_type: string
:param access_to: ip of the guest whose share access is managed
:type access_to: string
:param cbk: callback to adjust the exports of NFS server
Following is the description of cbk(ddict, edir, host).
:param ddict: association of shares with ips that have access to them
:type ddict: dict
:param edir: name of share i.e. export directory
:type edir: string
:param host: ip address derived from the access object
:type host: string
:returns: bool (cbk leaves ddict intact) or None (cbk modifies ddict)
"""
if access_type != 'ip':
raise exception.InvalidShareAccess('only ip access type allowed')
export_dir_dict = self._get_export_dir_dict()
if cbk(export_dir_dict, share_name, access_to):
return
if export_dir_dict:
export_dir_new = (",".join("/%s(%s)" % (d, "|".join(v))
for d, v in sorted(export_dir_dict.items())))
args = ('volume', 'set', self.gluster_manager.volume,
NFS_EXPORT_DIR, export_dir_new)
else:
args = ('volume', 'reset', self.gluster_manager.volume,
NFS_EXPORT_DIR)
try:
self.gluster_manager.gluster_call(*args)
except exception.ProcessExecutionError as exc:
LOG.error(_LE("Error in gluster volume set: %s"), exc.stderr)
raise
def allow_access(self, base, share, access):
"""Allow access to a share."""
def cbk(ddict, edir, host):
if edir not in ddict:
ddict[edir] = []
if host in ddict[edir]:
return True
ddict[edir].append(host)
self._manage_access(share['name'], access['access_type'],
access['access_to'], cbk)
def deny_access(self, base, share, access):
"""Deny access to a share."""
def cbk(ddict, edir, host):
if edir not in ddict or host not in ddict[edir]:
return True
ddict[edir].remove(host)
if not ddict[edir]:
ddict.pop(edir)
self._manage_access(share['name'], access['access_type'],
access['access_to'], cbk)
class GaneshaNFSHelper(ganesha.GaneshaNASHelper):
def __init__(self, execute, config_object, **kwargs):
self.gluster_manager = kwargs.pop('gluster_manager')
if config_object.glusterfs_ganesha_server_ip:
execute = ganesha_utils.SSHExecutor(
config_object.glusterfs_ganesha_server_ip, 22, None,
config_object.glusterfs_ganesha_server_username,
password=config_object.glusterfs_ganesha_server_password,
privatekey=config_object.glusterfs_path_to_private_key)
else:
execute = ganesha_utils.RootExecutor(execute)
super(GaneshaNFSHelper, self).__init__(execute, config_object,
**kwargs)
def _default_config_hook(self):
"""Callback to provide default export block."""
dconf = super(GaneshaNFSHelper, self)._default_config_hook()
conf_dir = ganesha_utils.path_from(__file__, "glusterfs", "conf")
ganesha_utils.patch(dconf, self._load_conf_dir(conf_dir))
return dconf
def _fsal_hook(self, base, share, access):
"""Callback to create FSAL subblock."""
return {"Hostname": self.gluster_manager.host,
"Volume": self.gluster_manager.volume,
"Volpath": "/" + share['name']}
| |
"""Tests for Philips Hue config flow."""
import asyncio
from unittest.mock import Mock, patch
import aiohue
import pytest
import voluptuous as vol
from homeassistant.components.hue import config_flow, const, errors
from tests.common import MockConfigEntry, mock_coro
async def test_flow_works(hass, aioclient_mock):
"""Test config flow ."""
aioclient_mock.get(
const.API_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}]
)
flow = config_flow.HueFlowHandler()
flow.hass = hass
await flow.async_step_init()
with patch("aiohue.Bridge") as mock_bridge:
def mock_constructor(host, websession, username=None):
"""Fake the bridge constructor."""
mock_bridge.host = host
return mock_bridge
mock_bridge.side_effect = mock_constructor
mock_bridge.username = "username-abc"
mock_bridge.config.name = "Mock Bridge"
mock_bridge.config.bridgeid = "bridge-id-1234"
mock_bridge.create_user.return_value = mock_coro()
mock_bridge.initialize.return_value = mock_coro()
result = await flow.async_step_link(user_input={})
assert mock_bridge.host == "1.2.3.4"
assert len(mock_bridge.create_user.mock_calls) == 1
assert len(mock_bridge.initialize.mock_calls) == 1
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "1.2.3.4",
"bridge_id": "bridge-id-1234",
"username": "username-abc",
}
async def test_flow_no_discovered_bridges(hass, aioclient_mock):
"""Test config flow discovers no bridges."""
aioclient_mock.get(const.API_NUPNP, json=[])
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_all_discovered_bridges_exist(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
aioclient_mock.get(
const.API_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}]
)
MockConfigEntry(domain="hue", data={"host": "1.2.3.4"}).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_one_bridge_discovered(hass, aioclient_mock):
"""Test config flow discovers one bridge."""
aioclient_mock.get(
const.API_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}]
)
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_flow_two_bridges_discovered(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
aioclient_mock.get(
const.API_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "init"
with pytest.raises(vol.Invalid):
assert result["data_schema"]({"host": "0.0.0.0"})
result["data_schema"]({"host": "1.2.3.4"})
result["data_schema"]({"host": "5.6.7.8"})
async def test_flow_two_bridges_discovered_one_new(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
aioclient_mock.get(
const.API_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
MockConfigEntry(domain="hue", data={"host": "1.2.3.4"}).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "link"
assert flow.host == "5.6.7.8"
async def test_flow_timeout_discovery(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
side_effect=asyncio.TimeoutError,
):
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_link_timeout(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
with patch("aiohue.Bridge.create_user", side_effect=asyncio.TimeoutError):
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
async def test_flow_link_button_not_pressed(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
with patch("aiohue.Bridge.create_user", side_effect=aiohue.LinkButtonNotPressed):
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "register_failed"}
async def test_flow_link_unknown_host(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
with patch("aiohue.Bridge.create_user", side_effect=aiohue.RequestError):
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
async def test_bridge_ssdp(hass):
"""Test a bridge being discovered."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch.object(
config_flow, "get_bridge", side_effect=errors.AuthenticationRequired
):
result = await flow.async_step_ssdp(
{
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_bridge_ssdp_discover_other_bridge(hass):
"""Test that discovery ignores other bridges."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_ssdp(
{"manufacturerURL": "http://www.notphilips.com"}
)
assert result["type"] == "abort"
async def test_bridge_ssdp_emulated_hue(hass):
"""Test if discovery info is from an emulated hue instance."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
"name": "HASS Bridge",
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
}
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_espalexa(hass):
"""Test if discovery info is from an Espalexa based device."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
"name": "Espalexa (0.0.0.0)",
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
}
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_already_configured(hass):
"""Test if a discovered bridge has already been configured."""
MockConfigEntry(domain="hue", data={"host": "0.0.0.0"}).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
}
)
assert result["type"] == "abort"
async def test_import_with_existing_config(hass):
"""Test importing a host with an existing config file."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
bridge = Mock()
bridge.username = "username-abc"
bridge.config.bridgeid = "bridge-id-1234"
bridge.config.name = "Mock Bridge"
bridge.host = "0.0.0.0"
with patch.object(
config_flow, "_find_username_from_config", return_value="mock-user"
), patch.object(config_flow, "get_bridge", return_value=mock_coro(bridge)):
result = await flow.async_step_import({"host": "0.0.0.0", "path": "bla.conf"})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "0.0.0.0",
"bridge_id": "bridge-id-1234",
"username": "username-abc",
}
async def test_import_with_no_config(hass):
"""Test importing a host without an existing config file."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch.object(
config_flow, "get_bridge", side_effect=errors.AuthenticationRequired
):
result = await flow.async_step_import({"host": "0.0.0.0"})
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_import_with_existing_but_invalid_config(hass):
"""Test importing a host with a config file with invalid username."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch.object(
config_flow, "_find_username_from_config", return_value="mock-user"
), patch.object(
config_flow, "get_bridge", side_effect=errors.AuthenticationRequired
):
result = await flow.async_step_import({"host": "0.0.0.0", "path": "bla.conf"})
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_import_cannot_connect(hass):
"""Test importing a host that we cannot conncet to."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch.object(config_flow, "get_bridge", side_effect=errors.CannotConnect):
result = await flow.async_step_import({"host": "0.0.0.0"})
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_creating_entry_removes_entries_for_same_host_or_bridge(hass):
"""Test that we clean up entries for same host and bridge.
An IP can only hold a single bridge and a single bridge can only be
accessible via a single IP. So when we create a new entry, we'll remove
all existing entries that either have same IP or same bridge_id.
"""
MockConfigEntry(
domain="hue", data={"host": "0.0.0.0", "bridge_id": "id-1234"}
).add_to_hass(hass)
MockConfigEntry(
domain="hue", data={"host": "1.2.3.4", "bridge_id": "id-1234"}
).add_to_hass(hass)
assert len(hass.config_entries.async_entries("hue")) == 2
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
bridge = Mock()
bridge.username = "username-abc"
bridge.config.bridgeid = "id-1234"
bridge.config.name = "Mock Bridge"
bridge.host = "0.0.0.0"
with patch.object(config_flow, "get_bridge", return_value=mock_coro(bridge)):
result = await flow.async_step_import({"host": "0.0.0.0"})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "0.0.0.0",
"bridge_id": "id-1234",
"username": "username-abc",
}
# We did not process the result of this entry but already removed the old
# ones. So we should have 0 entries.
assert len(hass.config_entries.async_entries("hue")) == 0
async def test_bridge_homekit(hass):
"""Test a bridge being discovered via HomeKit."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch.object(
config_flow, "get_bridge", side_effect=errors.AuthenticationRequired
):
result = await flow.async_step_homekit(
{
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_bridge_homekit_already_configured(hass):
"""Test if a HomeKit discovered bridge has already been configured."""
MockConfigEntry(domain="hue", data={"host": "0.0.0.0"}).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_homekit({"host": "0.0.0.0"})
assert result["type"] == "abort"
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lucene import JArray, \
PythonSet, PythonList, PythonIterator, PythonListIterator, JavaError, \
NoSuchElementException, IllegalStateException, IndexOutOfBoundsException
class JavaSet(PythonSet):
"""
This class implements java.util.Set around a Python set instance it wraps.
"""
def __init__(self, _set):
super(JavaSet, self).__init__()
self._set = _set
def __contains__(self, obj):
return obj in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._set)
def add(self, obj):
if obj not in self._set:
self._set.add(obj)
return True
return False
def addAll(self, collection):
size = len(self._set)
self._set.update(collection)
return len(self._set) > size
def clear(self):
self._set.clear()
def contains(self, obj):
return obj in self._set
def containsAll(self, collection):
for obj in collection:
if obj not in self._set:
return False
return True
def equals(self, collection):
if type(self) is type(collection):
return self._set == collection._set
return False
def isEmpty(self):
return len(self._set) == 0
def iterator(self):
class _iterator(PythonIterator):
def __init__(_self):
super(_iterator, _self).__init__()
_self._iterator = iter(self._set)
def hasNext(_self):
if hasattr(_self, '_next'):
return True
try:
_self._next = _self._iterator.next()
return True
except StopIteration:
return False
def next(_self):
if hasattr(_self, '_next'):
next = _self._next
del _self._next
else:
next = _self._iterator.next()
return next
return _iterator()
def remove(self, obj):
try:
self._set.remove(obj)
return True
except KeyError:
return False
def removeAll(self, collection):
result = False
for obj in collection:
try:
self._set.remove(obj)
result = True
except KeyError:
pass
return result
def retainAll(self, collection):
result = False
for obj in list(self._set):
if obj not in collection:
self._set.remove(obj)
result = True
return result
def size(self):
return len(self._set)
def toArray(self): # JavaSet
return list(self._set)
class JavaListIterator(PythonListIterator):
"""
This class implements java.util.ListIterator for a Python list instance it
wraps. (simple bidirectional iterator)
"""
def __init__(self, _lst, index=0):
super(JavaListIterator, self).__init__()
self._lst = _lst
self._lastIndex = -1 # keep state for remove/set
self.index = index
def next(self):
if self.index >= len(self._lst):
raise JavaError, NoSuchElementException(str(self.index))
result = self._lst[self.index]
self._lastIndex = self.index
self.index += 1
return result
def previous(self):
if self.index <= 0:
raise JavaError, NoSuchElementException(str(self.index - 1))
self.index -= 1
self._lastIndex = self.index
return self._lst[self.index]
def hasPrevious(self):
return self.index > 0
def hasNext(self):
return self.index < len(self._lst)
def nextIndex(self):
return min(self.index, len(self._lst))
def previousIndex(self):
return max(-1, self.index - 1)
def add(self, element):
"""
Inserts the specified element into the list.
The element is inserted immediately before the next element
that would be returned by next, if any, and after the next
element that would be returned by previous, if any.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("add")
self._lst.insert(self.index, element)
self.index += 1
self._lastIndex = -1 # invalidate state
def remove(self):
"""
Removes from the list the last element that
was returned by next or previous.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("remove")
del self._lst[self._lastIndex]
self._lastIndex = -1 # invalidate state
def set(self, element):
"""
Replaces the last element returned by next or previous
with the specified element.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("set")
self._lst[self._lastIndex] = element
def __iter__(self):
return self
class JavaList(PythonList):
"""
This class implements java.util.List around a Python list instance it wraps.
"""
def __init__(self, _lst):
super(JavaList, self).__init__()
self._lst = _lst
def __contains__(self, obj):
return obj in self._lst
def __len__(self):
return len(self._lst)
def __iter__(self):
return iter(self._lst)
def add(self, index, obj):
self._lst.insert(index, obj)
def addAll(self, collection):
size = len(self._lst)
self._lst.extend(collection)
return len(self._lst) > size
def addAll(self, index, collection):
size = len(self._lst)
self._lst[index:index] = collection
return len(self._lst) > size
def clear(self):
del self._lst[:]
def contains(self, obj):
return obj in self._lst
def containsAll(self, collection):
for obj in collection:
if obj not in self._lst:
return False
return True
def equals(self, collection):
if type(self) is type(collection):
return self._lst == collection._lst
return False
def get(self, index):
if index < 0 or index >= self.size():
raise JavaError, IndexOutOfBoundsException(str(index))
return self._lst[index]
def indexOf(self, obj):
try:
return self._lst.index(obj)
except ValueError:
return -1
def isEmpty(self):
return len(self._lst) == 0
def iterator(self):
class _iterator(PythonIterator):
def __init__(_self):
super(_iterator, _self).__init__()
_self._iterator = iter(self._lst)
def hasNext(_self):
if hasattr(_self, '_next'):
return True
try:
_self._next = _self._iterator.next()
return True
except StopIteration:
return False
def next(_self):
if hasattr(_self, '_next'):
next = _self._next
del _self._next
else:
next = _self._iterator.next()
return next
return _iterator()
def lastIndexOf(self, obj):
i = len(self._lst)-1
while (i>=0):
if obj.equals(self._lst[i]):
break
i -= 1
return i
def listIterator(self, index=0):
return JavaListIterator(self._lst, index)
def remove(self, obj_or_index):
if type(obj_or_index) is type(1):
return removeAt(int(obj_or_index))
return removeElement(obj_or_index)
def removeAt(self, pos):
"""
Removes the element at the specified position in this list.
Note: private method called from Java via remove(int index)
index is already checked (or IndexOutOfBoundsException thrown)
"""
try:
el = self._lst[pos]
del self._lst[pos]
return el
except IndexError:
# should not happen
return None
def removeObject(self, obj):
"""
Removes the first occurrence of the specified object
from this list, if it is present
"""
try:
self._lst.remove(obj)
return True
except ValueError:
return False
def removeAll(self, collection):
result = False
for obj in collection:
if self.removeElement(obj):
result = True
return result
def retainAll(self, collection):
result = False
for obj in self._lst:
if obj not in collection and self.removeElement(obj):
result = True
return result
def size(self):
return len(self._lst)
def toArray(self):
return self._lst
def subListChecked(self, fromIndex, toIndex):
"""
Note: private method called from Java via subList()
from/to index are already checked (or IndexOutOfBoundsException thrown)
also IllegalArgumentException is thronw if the endpoint indices
are out of order (fromIndex > toIndex)
"""
sublst = self._lst[fromIndex:toIndex]
return JavaList(sublst)
def set(self, index, obj):
if index < 0 or index >= self.size():
raise JavaError, IndexOutOfBoundsException(str(index))
self._lst[index] = obj
| |
# Classes for scrAPI Harvesters
from __future__ import unicode_literals
import abc
import json
import logging
from datetime import timedelta, date
from lxml import etree
from scrapi import util
from scrapi import requests
from scrapi import registry
from scrapi import settings
from scrapi.base.schemas import OAISCHEMA
from scrapi.base.helpers import updated_schema, build_properties
from scrapi.linter.document import RawDocument, NormalizedDocument
from scrapi.base.transformer import XMLTransformer, JSONTransformer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
etree.set_default_parser(etree.XMLParser(recover=True))
class HarvesterMeta(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(HarvesterMeta, cls).__init__(name, bases, dct)
if len(cls.__abstractmethods__) == 0 and cls.short_name not in settings.disabled:
registry[cls.short_name] = cls()
else:
logger.info('Class {} not added to registry'.format(cls.__name__))
class BaseHarvester(object):
""" This is a base class that all harvesters should inheret from
Defines the copy to unicode method, which is useful for getting standard
unicode out of xml results.
"""
__metaclass__ = HarvesterMeta
@abc.abstractproperty
def short_name(self):
raise NotImplementedError
@abc.abstractproperty
def long_name(self):
raise NotImplementedError
@abc.abstractproperty
def url(self):
raise NotImplementedError
@abc.abstractproperty
def file_format(self):
raise NotImplementedError
@abc.abstractmethod
def harvest(self, start_date=None, end_date=None):
raise NotImplementedError
@abc.abstractmethod
def normalize(self, raw_doc):
raise NotImplementedError
@property
def run_at(self):
return {
'hour': 22,
'minute': 59,
'day_of_week': 'mon-fri',
}
class JSONHarvester(BaseHarvester, JSONTransformer):
file_format = 'json'
def normalize(self, raw_doc):
transformed = self.transform(json.loads(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name
}
return NormalizedDocument(transformed)
class XMLHarvester(BaseHarvester, XMLTransformer):
file_format = 'xml'
def normalize(self, raw_doc):
transformed = self.transform(etree.XML(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name
}
return NormalizedDocument(transformed)
class OAIHarvester(XMLHarvester):
""" Create a harvester with a oai_dc namespace, that will harvest
documents within a certain date range
Contains functions for harvesting from an OAI provider, normalizing,
and outputting in a way that scrapi can understand, in the most
generic terms possible.
For more information, see the OAI PMH specification:
http://www.openarchives.org/OAI/openarchivesprotocol.html
"""
record_encoding = None
DEFAULT_ENCODING = 'UTF-8'
RESUMPTION = '&resumptionToken='
RECORDS_URL = '?verb=ListRecords'
META_PREFIX_DATE = '&metadataPrefix=oai_dc&from={}&until={}'
# Override these variable is required
namespaces = {
'dc': 'http://purl.org/dc/elements/1.1/',
'ns0': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/',
}
timeout = 0.5
approved_sets = None
timezone_granularity = False
property_list = ['date', 'type']
@property
def schema(self):
properties = {
'otherProperties': build_properties(*[(item, (
'//dc:{}/node()'.format(item),
'//ns0:{}/node()'.format(item),
self.resolve_property)
) for item in self.property_list])
}
return updated_schema(OAISCHEMA, properties)
def resolve_property(self, dc, ns0):
ret = dc + ns0
return ret[0] if len(ret) == 1 else ret
def harvest(self, start_date=None, end_date=None):
start_date = (start_date or date.today() - timedelta(settings.DAYS_BACK)).isoformat()
end_date = (end_date or date.today()).isoformat()
if self.timezone_granularity:
start_date += 'T00:00:00Z'
end_date += 'T00:00:00Z'
records_url = self.base_url + self.RECORDS_URL
request_url = records_url + self.META_PREFIX_DATE.format(start_date, end_date)
records = self.get_records(request_url, start_date, end_date)
rawdoc_list = []
for record in records:
doc_id = record.xpath(
'ns0:header/ns0:identifier', namespaces=self.namespaces)[0].text
record = etree.tostring(record, encoding=self.record_encoding)
rawdoc_list.append(RawDocument({
'doc': record,
'source': util.copy_to_unicode(self.short_name),
'docID': util.copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return rawdoc_list
def get_records(self, url, start_date, end_date, resump_token=''):
data = requests.get(url, throttle=self.timeout)
doc = etree.XML(data.content)
records = doc.xpath(
'//ns0:record',
namespaces=self.namespaces
)
token = doc.xpath(
'//ns0:resumptionToken/node()',
namespaces=self.namespaces
)
if len(token) == 1:
base_url = url.replace(self.META_PREFIX_DATE.format(start_date, end_date), '')
base_url = base_url.replace(self.RESUMPTION + resump_token, '')
url = base_url + self.RESUMPTION + token[0]
records += self.get_records(url, start_date, end_date, resump_token=token[0])
return records
def normalize(self, raw_doc):
str_result = raw_doc.get('doc')
result = etree.XML(str_result)
if self.approved_sets:
set_spec = result.xpath(
'ns0:header/ns0:setSpec/node()',
namespaces=self.namespaces
)
# check if there's an intersection between the approved sets and the
# setSpec list provided in the record. If there isn't, don't normalize.
if not {x.replace('publication:', '') for x in set_spec}.intersection(self.approved_sets):
logger.info('Series {} not in approved list'.format(set_spec))
return None
status = result.xpath('ns0:header/@status', namespaces=self.namespaces)
if status and status[0] == 'deleted':
logger.info('Deleted record, not normalizing {}'.format(raw_doc['docID']))
return None
return super(OAIHarvester, self).normalize(raw_doc)
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from generate import generate
class Expression:
def __init__(self, kind, type, interop = False, reducible = False, doc=""):
self.kind = kind
self.type = type
self.interop = interop
self.reducible = reducible
self.doc = doc
expressions = [
#
# enum kind tree node type
#
#
# DO NOT REORDER THESE, THEY COME FROM THE LINQ V1 ENUM
#
# Enum Value Expression Class Flags
Expression("Add", "BinaryExpression", interop = True, doc="arithmetic addition without overflow checking"),
Expression("AddChecked", "BinaryExpression", doc="arithmetic addition with overflow checking"),
Expression("And", "BinaryExpression", interop = True, doc="a bitwise AND operation"),
Expression("AndAlso", "BinaryExpression", doc="a short-circuiting conditional AND operation"),
Expression("ArrayLength", "UnaryExpression", doc="getting the length of a one-dimensional array"),
Expression("ArrayIndex", "BinaryExpression", doc="indexing into a one-dimensional array"),
Expression("Call", "MethodCallExpression", doc="represents a method call"),
Expression("Coalesce", "BinaryExpression", doc="a null coalescing operation"),
Expression("Conditional", "ConditionalExpression", doc="a conditional operation"),
Expression("Constant", "ConstantExpression", doc="an expression that has a constant value"),
Expression("Convert", "UnaryExpression", doc="a cast or conversion operation. If the operation is a numeric conversion, it overflows silently if the converted value does not fit the target type"),
Expression("ConvertChecked", "UnaryExpression", doc="a cast or conversion operation. If the operation is a numeric conversion, an exception is thrown if the converted value does not fit the target type"),
Expression("Divide", "BinaryExpression", interop = True, doc="arithmetic division"),
Expression("Equal", "BinaryExpression", interop = True, doc="an equality comparison"),
Expression("ExclusiveOr", "BinaryExpression", interop = True, doc="a bitwise XOR operation"),
Expression("GreaterThan", "BinaryExpression", interop = True, doc='a "greater than" numeric comparison'),
Expression("GreaterThanOrEqual", "BinaryExpression", interop = True, doc='a "greater than or equal" numeric comparison'),
Expression("Invoke", "InvocationExpression", doc="applying a delegate or lambda expression to a list of argument expressions"),
Expression("Lambda", "LambdaExpression", doc="a lambda expression"),
Expression("LeftShift", "BinaryExpression", interop = True, doc="a bitwise left-shift operation"),
Expression("LessThan", "BinaryExpression", interop = True, doc='a "less than" numeric comparison'),
Expression("LessThanOrEqual", "BinaryExpression", interop = True, doc='a "less than or equal" numeric comparison'),
Expression("ListInit", "ListInitExpression", doc="creating a new IEnumerable object and initializing it from a list of elements"),
Expression("MemberAccess", "MemberExpression", doc="reading from a field or property"),
Expression("MemberInit", "MemberInitExpression", doc="creating a new object and initializing one or more of its members"),
Expression("Modulo", "BinaryExpression", interop = True, doc="an arithmetic remainder operation"),
Expression("Multiply", "BinaryExpression", interop = True, doc="arithmetic multiplication without overflow checking"),
Expression("MultiplyChecked", "BinaryExpression", doc="arithmetic multiplication with overflow checking"),
Expression("Negate", "UnaryExpression", interop = True, doc="an arithmetic negation operation"),
Expression("UnaryPlus", "UnaryExpression", interop = True, doc="a unary plus operation. The result of a predefined unary plus operation is simply the value of the operand, but user-defined implementations may have non-trivial results"),
Expression("NegateChecked", "UnaryExpression", doc="an arithmetic negation operation that has overflow checking"),
Expression("New", "NewExpression", doc="calling a constructor to create a new object"),
Expression("NewArrayInit", "NewArrayExpression", doc="creating a new one-dimensional array and initializing it from a list of elements"),
Expression("NewArrayBounds", "NewArrayExpression", doc="creating a new array where the bounds for each dimension are specified"),
Expression("Not", "UnaryExpression", interop = True, doc="a bitwise complement operation"),
Expression("NotEqual", "BinaryExpression", interop = True, doc="an inequality comparison"),
Expression("Or", "BinaryExpression", interop = True, doc="a bitwise OR operation"),
Expression("OrElse", "BinaryExpression", doc="a short-circuiting conditional OR operation"),
Expression("Parameter", "ParameterExpression", doc="a reference to a parameter or variable defined in the context of the expression"),
Expression("Power", "BinaryExpression", interop = True, doc="raising a number to a power"),
Expression("Quote", "UnaryExpression", doc="an expression that has a constant value of type Expression. A Quote node can contain references to parameters defined in the context of the expression it represents"),
Expression("RightShift", "BinaryExpression", interop = True, doc="a bitwise right-shift operation"),
Expression("Subtract", "BinaryExpression", interop = True, doc="arithmetic subtraction without overflow checking"),
Expression("SubtractChecked", "BinaryExpression", doc="arithmetic subtraction with overflow checking"),
Expression("TypeAs", "UnaryExpression", doc="an explicit reference or boxing conversion where null reference (Nothing in Visual Basic) is supplied if the conversion fails"),
Expression("TypeIs", "TypeBinaryExpression", doc="a type test"),
# New types in LINQ V2
Expression("Assign", "BinaryExpression", doc="an assignment"),
Expression("Block", "BlockExpression", doc="a block of expressions"),
Expression("DebugInfo", "DebugInfoExpression", doc="a debugging information"),
Expression("Decrement", "UnaryExpression", interop = True, doc="a unary decrement"),
Expression("Dynamic", "DynamicExpression", doc="a dynamic operation"),
Expression("Default", "DefaultExpression", doc="a default value"),
Expression("Extension", "ExtensionExpression", doc="an extension expression"),
Expression("Goto", "GotoExpression", doc="a goto"),
Expression("Increment", "UnaryExpression", interop = True, doc="a unary increment"),
Expression("Index", "IndexExpression", doc="an index operation"),
Expression("Label", "LabelExpression", doc="a label"),
Expression("RuntimeVariables", "RuntimeVariablesExpression", doc="a list of runtime variables"),
Expression("Loop", "LoopExpression", doc="a loop"),
Expression("Switch", "SwitchExpression", doc="a switch operation"),
Expression("Throw", "UnaryExpression", doc="a throwing of an exception"),
Expression("Try", "TryExpression", doc="a try-catch expression"),
Expression("Unbox", "UnaryExpression", doc="an unbox value type operation"),
Expression("AddAssign", "BinaryExpression", interop = True, reducible = True, doc="an arithmetic addition compound assignment without overflow checking"),
Expression("AndAssign", "BinaryExpression", interop = True, reducible = True, doc="a bitwise AND compound assignment"),
Expression("DivideAssign", "BinaryExpression", interop = True, reducible = True, doc="an arithmetic division compound assignment "),
Expression("ExclusiveOrAssign", "BinaryExpression", interop = True, reducible = True, doc="a bitwise XOR compound assignment"),
Expression("LeftShiftAssign", "BinaryExpression", interop = True, reducible = True, doc="a bitwise left-shift compound assignment"),
Expression("ModuloAssign", "BinaryExpression", interop = True, reducible = True, doc="an arithmetic remainder compound assignment"),
Expression("MultiplyAssign", "BinaryExpression", interop = True, reducible = True, doc="arithmetic multiplication compound assignment without overflow checking"),
Expression("OrAssign", "BinaryExpression", interop = True, reducible = True, doc="a bitwise OR compound assignment"),
Expression("PowerAssign", "BinaryExpression", interop = True, reducible = True, doc="raising a number to a power compound assignment"),
Expression("RightShiftAssign", "BinaryExpression", interop = True, reducible = True, doc="a bitwise right-shift compound assignment"),
Expression("SubtractAssign", "BinaryExpression", interop = True, reducible = True, doc="arithmetic subtraction compound assignment without overflow checking"),
Expression("AddAssignChecked", "BinaryExpression", reducible = True, doc="an arithmetic addition compound assignment with overflow checking"),
Expression("MultiplyAssignChecked", "BinaryExpression", reducible = True, doc="arithmetic multiplication compound assignment with overflow checking"),
Expression("SubtractAssignChecked", "BinaryExpression", reducible = True, doc="arithmetic subtraction compound assignment with overflow checking"),
Expression("PreIncrementAssign", "UnaryExpression", reducible = True, doc="an unary prefix increment"),
Expression("PreDecrementAssign", "UnaryExpression", reducible = True, doc="an unary prefix decrement"),
Expression("PostIncrementAssign", "UnaryExpression", reducible = True, doc="an unary postfix increment"),
Expression("PostDecrementAssign", "UnaryExpression", reducible = True, doc="an unary postfix decrement"),
Expression("TypeEqual", "TypeBinaryExpression", doc="a exact type test"),
Expression("OnesComplement", "UnaryExpression", interop = True, doc="a ones complement"),
Expression("IsTrue", "UnaryExpression", interop = True, doc="a true condition value"),
Expression("IsFalse", "UnaryExpression", interop = True, doc="a false condition value"),
]
def get_unique_types():
return sorted(list(set(filter(None, map(lambda n: n.type, expressions)))))
def gen_tree_nodes(cw):
for node in expressions:
cw.write("/// <summary>")
cw.write("/// A node that represents " + node.doc + ".")
cw.write("/// </summary>")
cw.write(node.kind + ",")
def gen_stackspiller_switch(cw):
no_spill_node_kinds = ["Quote", "Parameter", "Constant", "RuntimeVariables", "Default", "DebugInfo"]
# nodes that need spilling
for node in expressions:
if node.kind in no_spill_node_kinds or node.reducible:
continue
method = "Rewrite"
# special case certain expressions
if node.kind in ["Quote", "Throw", "Assign"]:
method += node.kind
#special case AndAlso and OrElse
if node.kind in ["AndAlso", "OrElse", "Coalesce"]:
method += "Logical"
cw.write("case ExpressionType." + node.kind + ":")
cw.write(" result = " + method + node.type + "(node, stack);")
cw.write(" break;")
# core reducible nodes
for node in expressions:
if node.reducible:
cw.write("case ExpressionType." + node.kind + ":")
cw.write(" result = RewriteReducibleExpression(node, stack);")
cw.write(" break;")
# no spill nodes
for kind in no_spill_node_kinds:
cw.write("case ExpressionType." + kind + ":")
cw.write(" return new Result(RewriteAction.None, node);")
def gen_compiler(cw):
for node in expressions:
if node.reducible:
continue
method = "Emit"
# special case certain unary/binary expressions
if node.kind in ["AndAlso", "OrElse", "Quote", "Coalesce", "Unbox", "Throw", "Assign"]:
method += node.kind
elif node.kind in ["Convert", "ConvertChecked"]:
method += "Convert"
cw.write("case ExpressionType." + node.kind + ":")
if node.kind in ["Coalesce", "Constant", "Lambda", "ListInit", "Loop", "MemberAccess", "MemberInit",
"New", "NewArrayInit", "NewArrayBounds", "Parameter", "Quote", "TypeIs",
"Assign", "DebugInfo", "Dynamic", "Default", "Extension", "Index", "RuntimeVariables",
"Throw", "Try", "Unbox", "TypeEqual"]:
cw.write(" " + method + node.type + "(node);")
else:
cw.write(" " + method + node.type + "(node, flags);")
cw.write(" break;")
def gen_op_validator(type, cw):
for node in expressions:
if node.interop and node.type == type:
cw.write("case ExpressionType.%s:" % node.kind)
def gen_binop_validator(cw):
gen_op_validator("BinaryExpression", cw)
def gen_unop_validator(cw):
gen_op_validator("UnaryExpression", cw)
def gen_checked_ops(cw):
for node in expressions:
if node.kind.endswith("Checked"):
cw.write("case ExpressionType.%s:" % node.kind)
def get_type_name(t):
if not t.IsGenericType:
return t.Name
name = t.Name[:t.Name.IndexOf("`")]
name += "<" + ", ".join(map(lambda g: g.Name, t.GetGenericArguments())) + ">"
return name
def gen_debug_proxy(cw, e):
name = e.Name + "Proxy"
cw.enter_block("internal class %(name)s", name=name)
cw.write("""private readonly %(expression)s _node;
public %(name)s(%(expression)s node) {
_node = node;
}
""", name = name, expression = e.Name)
import System.Reflection
bf = System.Reflection.BindingFlags
# properties
def get_properties(e):
properties = []
atom = set()
def add(l):
for p in l:
if not p.Name in atom:
atom.add(p.Name)
properties.append(p)
while e:
add(e.GetProperties(bf.Instance | bf.Public))
add(e.GetProperties(bf.Instance | bf.NonPublic))
e = e.BaseType
properties.sort(None, lambda p: p.Name)
return properties
properties = get_properties(e)
for p in properties:
if p.Name == "Dump": continue
get = p.GetGetMethod(True)
if not get: continue
if not get.IsPublic and p.Name != "DebugView": continue
cw.write("public %(type)s %(name)s { get { return _node.%(name)s; } }", type = get_type_name(p.PropertyType), name = p.Name)
cw.exit_block()
def gen_debug_proxies(cw):
import clr
msc = clr.LoadAssemblyByPartialName("Microsoft.Scripting.Core")
expr = msc.GetType("Microsoft.Scripting.Ast.Expression")
custom = [ 'SwitchCase', 'CatchBlock' ]
ignore = [ 'Expression' ]
def expression_filter(e):
if not e.IsPublic: return False
if not e.Namespace.StartsWith("Microsoft.Scripting.Ast"): return False
if e.IsGenericType: return False
if e.Name in ignore: return False
if expr.IsAssignableFrom(e): return True
if e.Name in custom: return True
return False
expressions = filter(expression_filter, msc.GetTypes())
expressions.sort(None, lambda e: e.Name)
first = True
for e in expressions:
if not first: cw.write("")
else: first = False
gen_debug_proxy(cw, e)
def main():
temp_list = [ ("Expression Tree Node Types", gen_tree_nodes),
("Checked Operations", gen_checked_ops),
("Binary Operation Binder Validator", gen_binop_validator),
("Unary Operation Binder Validator", gen_unop_validator),
("StackSpiller Switch", gen_stackspiller_switch),
("Expression Compiler", gen_compiler)
]
import System
if System.Environment.Version.Major<4:
temp_list.append(("Expression Debugger Proxies", gen_debug_proxies))
return generate(*temp_list)
if __name__ == "__main__":
main()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import fixtures as fx
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova import utils
CONF = cfg.CONF
class TestConfFixture(testtools.TestCase):
"""Test the Conf fixtures in Nova.
This is a basic test that this fixture works like we expect.
Expectations:
1. before using the fixture, a default value (api_paste_config)
comes through untouched.
2. before using the fixture, a known default value that we
override is correct.
3. after using the fixture a known value that we override is the
new value.
4. after using the fixture we can set a default value to something
random, and it will be reset once we are done.
There are 2 copies of this test so that you can verify they do the
right thing with:
tox -e py27 test_fixtures -- --concurrency=1
As regardless of run order, their initial asserts would be
impacted if the reset behavior isn't working correctly.
"""
def _test_override(self):
self.assertEqual('api-paste.ini', CONF.wsgi.api_paste_config)
self.assertFalse(CONF.fake_network)
self.useFixture(conf_fixture.ConfFixture())
CONF.set_default('api_paste_config', 'foo', group='wsgi')
self.assertTrue(CONF.fake_network)
def test_override1(self):
self._test_override()
def test_override2(self):
self._test_override()
class TestOutputStream(testtools.TestCase):
"""Ensure Output Stream capture works as expected.
This has the added benefit of providing a code example of how you
can manipulate the output stream in your own tests.
"""
def test_output(self):
self.useFixture(fx.EnvironmentVariable('OS_STDOUT_CAPTURE', '1'))
self.useFixture(fx.EnvironmentVariable('OS_STDERR_CAPTURE', '1'))
out = self.useFixture(fixtures.OutputStreamCapture())
sys.stdout.write("foo")
sys.stderr.write("bar")
self.assertEqual("foo", out.stdout)
self.assertEqual("bar", out.stderr)
# TODO(sdague): nuke the out and err buffers so it doesn't
# make it to testr
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestTimeout(testtools.TestCase):
"""Tests for our timeout fixture.
Testing the actual timeout mechanism is beyond the scope of this
test, because it's a pretty clear pass through to fixtures'
timeout fixture, which tested in their tree.
"""
def test_scaling(self):
# a bad scaling factor
self.assertRaises(ValueError, fixtures.Timeout, 1, 0.5)
# various things that should work.
timeout = fixtures.Timeout(10)
self.assertEqual(10, timeout.test_timeout)
timeout = fixtures.Timeout("10")
self.assertEqual(10, timeout.test_timeout)
timeout = fixtures.Timeout("10", 2)
self.assertEqual(20, timeout.test_timeout)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
def test_responds_to_version(self, mock_service_create, mock_get):
"""Ensure the OSAPI server responds to calls sensibly."""
self.useFixture(fixtures.OutputStreamCapture())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
# request a bad root url, should be a 404
#
# NOTE(sdague): this currently fails, as it falls into the 300
# dispatcher instead. This is a bug. The test case is left in
# here, commented out until we can address it.
#
# resp = api.api_request('/foo', strip_version=True)
# self.assertEqual(resp.status_code, 400, resp.content)
# request a known bad url, and we should get a 404
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
# insert a 6th instance type, column 5 below is an int id
# which has a constraint on it, so if new standard instance
# types are added you have to bump it.
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the db contains nothing
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
# No data inserted by migrations so we need to add a row
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# Manually do the cleanup that addCleanup will do
fix.cleanup()
# Ensure the db contains nothing
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
class TestDatabaseAtVersionFixture(testtools.TestCase):
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
# In/after 317 aggregates did have uuid
self.useFixture(fixtures.DatabaseAtVersion(318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
# Before 317, aggregates had no uuid
self.useFixture(fixtures.DatabaseAtVersion(316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.DatabaseAtVersion(318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
# Should initially be None
self.assertIsNone(obj_base.NovaObject.indirection_api)
# make sure the fixture correctly sets the value
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the initial value is restored
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestPlacementFixture(testtools.TestCase):
def test_responds_to_version(self):
"""Ensure the Placement server responds to calls sensibly."""
placement_fixture = self.useFixture(fixtures.PlacementFixture())
# request the API root, which provides us the versions of the API
resp = placement_fixture._fake_get(None, '/')
self.assertEqual(200, resp.status_code)
# request a known bad url, and we should get a 404
resp = placement_fixture._fake_get(None, '/foo')
self.assertEqual(404, resp.status_code)
# unsets the token so we fake missing it
placement_fixture.token = None
resp = placement_fixture._fake_get(None, '/foo')
self.assertEqual(401, resp.status_code)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
import tensorflow as tf
from object_detection.core import box_list
from object_detection.utils import shape_utils
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope, 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope, 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def matched_intersection(boxlist1, boxlist2, scope=None):
"""Compute intersection areas between corresponding boxes in two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise intersections
"""
with tf.name_scope(scope, 'MatchedIntersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
min_ymax = tf.minimum(y_max1, y_max2)
max_ymin = tf.maximum(y_min1, y_min2)
intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
min_xmax = tf.minimum(x_max1, x_max2)
max_xmin = tf.maximum(x_min1, x_min2)
intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
return tf.reshape(intersect_heights * intersect_widths, [-1])
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def matched_iou(boxlist1, boxlist2, scope=None):
"""Compute intersection-over-union between corresponding boxes in boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'MatchedIOU'):
intersections = matched_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1 + areas2 - intersections
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def ioa(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-area between box collections.
intersection-over-area (IOA) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, ioa(box1, box2) != ioa(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise ioa scores.
"""
with tf.name_scope(scope, 'IOA'):
intersections = intersection(boxlist1, boxlist2)
areas = tf.expand_dims(area(boxlist2), 0)
return tf.truediv(intersections, areas)
def prune_non_overlapping_boxes(
boxlist1, boxlist2, min_overlap=0.0, scope=None):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
min_overlap: Minimum required overlap between boxes, to count them as
overlapping.
scope: name scope.
Returns:
new_boxlist1: A pruned boxlist with size [N', 4].
keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the
first input BoxList `boxlist1`.
"""
with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1, keep_inds
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope, 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def change_coordinate_frame(boxlist, window, scope=None):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
scope: name scope.
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(box_list.BoxList(
boxlist.get() - [window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def sq_dist(boxlist1, boxlist2, scope=None):
"""Computes the pairwise squared distances between box corners.
This op treats each box as if it were a point in a 4d Euclidean space and
computes pairwise squared distances.
Mathematically, we are given two matrices of box coordinates X and Y,
where X(i,:) is the i'th row of X, containing the 4 numbers defining the
corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to
boxlist2. We compute
Z(i,j) = ||X(i,:) - Y(j,:)||^2
= ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise distances
"""
with tf.name_scope(scope, 'SqDist'):
sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)
sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)
innerprod = tf.matmul(boxlist1.get(), boxlist2.get(),
transpose_a=False, transpose_b=True)
return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
def boolean_mask(boxlist, indicator, fields=None, scope=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope, 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def gather(boxlist, indices, fields=None, scope=None):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope(scope, 'Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
subboxlist = box_list.BoxList(tf.gather(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.gather(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def concatenate(boxlists, fields=None, scope=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
scope: name scope.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
with tf.name_scope(scope, 'Concatenate'):
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = box_list.BoxList(
tf.concat([boxlist.get() for boxlist in boxlists], 0))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).get_shape().as_list()
first_field_shape[0] = -1
if None in first_field_shape:
raise ValueError('field %s must have fully defined shape except for the'
' 0th dimension.' % field)
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).get_shape().as_list()
field_shape[0] = -1
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = tf.concat(
[boxlist.get_field(field) for boxlist in boxlists], 0)
concatenated.add_field(field, concatenated_field)
return concatenated
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) descend or ascend. Default is descend.
scope: name scope.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist
ValueError: if the order is not either descend or ascend
"""
with tf.name_scope(scope, 'SortByField'):
if order != SortOrder.descend and order != SortOrder.ascend:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
if len(field_to_sort.shape.as_list()) != 1:
raise ValueError('Field should have rank 1')
num_boxes = boxlist.num_boxes()
num_entries = tf.size(field_to_sort)
length_assert = tf.Assert(
tf.equal(num_boxes, num_entries),
['Incorrect field size: actual vs expected.', num_entries, num_boxes])
with tf.control_dependencies([length_assert]):
# TODO(derekjchow): Remove with tf.device when top_k operation runs
# correctly on GPU.
with tf.device('/cpu:0'):
_, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)
if order == SortOrder.ascend:
sorted_indices = tf.reverse_v2(sorted_indices, [0])
return gather(boxlist, sorted_indices)
def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
"""Overlay bounding box list on image.
Currently this visualization plots a 1 pixel thick red bounding box on top
of the image. Note that tf.image.draw_bounding_boxes essentially is
1 indexed.
Args:
image: an image tensor with shape [height, width, 3]
boxlist: a BoxList
normalized: (boolean) specify whether corners are to be interpreted
as absolute coordinates in image space or normalized with respect to the
image size.
scope: name scope.
Returns:
image_and_boxes: an image tensor with shape [height, width, 3]
"""
with tf.name_scope(scope, 'VisualizeBoxesInImage'):
if not normalized:
height, width, _ = tf.unstack(tf.shape(image))
boxlist = scale(boxlist,
1.0 / tf.cast(height, tf.float32),
1.0 / tf.cast(width, tf.float32))
corners = tf.expand_dims(boxlist.get(), 0)
image = tf.expand_dims(image, 0)
return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
def filter_field_value_equals(boxlist, field, value, scope=None):
"""Filter to keep only boxes with field entries equal to the given value.
Args:
boxlist: BoxList holding N boxes.
field: field name for filtering.
value: scalar value.
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not have
the specified field.
"""
with tf.name_scope(scope, 'FilterFieldValueEquals'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field(field):
raise ValueError('boxlist must contain the specified field')
filter_field = boxlist.get_field(field)
gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])
return gather(boxlist, gather_index)
def filter_greater_than(boxlist, thresh, scope=None):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
TODO(jonathanhuang): Change function name to filter_scores_greater_than
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
with tf.name_scope(scope, 'FilterGreaterThan'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape.as_list()) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = tf.cast(tf.reshape(
tf.where(tf.greater(scores, thresh)),
[-1]), tf.int32)
return gather(boxlist, high_score_indices)
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices)
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def to_normalized_coordinates(boxlist, height, width,
check_range=True, scope=None):
"""Converts absolute box coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
boxlist: BoxList with coordinates in terms of pixel-locations.
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
boxlist with normalized coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, 1 / height, 1 / width)
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.01,
scope=None):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.01.
scope: name scope.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum),
['maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
def refine_boxes_multi_class(pool_boxes,
num_classes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Box refinement is done independently for each class.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field and a rank 1 'classes' field.
num_classes: (int scalar) Number of classes.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores and classes field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
if not pool_boxes.has_field('classes'):
raise ValueError('pool_boxes must have a \'classes\' field')
refined_boxes = []
for i in range(num_classes):
boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)
refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,
nms_max_detections, voting_iou_thresh)
refined_boxes.append(refined_boxes_class)
return sort_by_field(concatenate(refined_boxes), 'scores')
def refine_boxes(pool_boxes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
nms_boxes = non_max_suppression(
pool_boxes, nms_iou_thresh, nms_max_detections)
return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
"""Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.
Performs box voting as described in 'Object detection via a multi-region &
semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
with iou overlap >= iou_thresh. The location of B is set to the weighted
average location of boxes in S (scores are used for weighting). And the score
of B is set to the average score of boxes in S.
Args:
selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
boxes are usually selected from pool_boxes using non max suppression.
pool_boxes: BoxList containing a set of (possibly redundant) boxes.
iou_thresh: (float scalar) iou threshold for matching boxes in
selected_boxes and pool_boxes.
Returns:
BoxList containing averaged locations and scores for each box in
selected_boxes.
Raises:
ValueError: if
a) selected_boxes or pool_boxes is not a BoxList.
b) if iou_thresh is not in [0, 1].
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if not isinstance(selected_boxes, box_list.BoxList):
raise ValueError('selected_boxes must be a BoxList')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
num_matches = tf.reduce_sum(match_indicator, 1)
# TODO(kbanoop): Handle the case where some boxes in selected_boxes do not
# match to any boxes in pool_boxes. For such boxes without any matches, we
# should return the original boxes without voting.
match_assert = tf.Assert(
tf.reduce_all(tf.greater(num_matches, 0)),
['Each box in selected_boxes must match with at least one box '
'in pool_boxes.'])
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(
tf.reduce_all(tf.greater_equal(scores, 0)),
['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches
box_locations = tf.matmul(match_indicator,
pool_boxes.get() * scores) / sum_scores
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
"""Pads or clips all fields of a BoxList.
Args:
boxlist: A BoxList with arbitrary of number of boxes.
num_boxes: First num_boxes in boxlist are kept.
The fields are zero-padded if num_boxes is bigger than the
actual number of boxes.
scope: name scope.
Returns:
BoxList with all fields padded or clipped.
"""
with tf.name_scope(scope, 'PadOrClipBoxList'):
subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(
boxlist.get(), num_boxes))
for field in boxlist.get_extra_fields():
subfield = shape_utils.pad_or_clip_tensor(
boxlist.get_field(field), num_boxes)
subboxlist.add_field(field, subfield)
return subboxlist
def select_random_box(boxlist,
default_box=None,
seed=None,
scope=None):
"""Selects a random bounding box from a `BoxList`.
Args:
boxlist: A BoxList.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[-1., -1., -1., -1.]].
seed: Random seed.
scope: Name scope.
Returns:
bbox: A [1, 4] tensor with a random bounding box.
valid: A bool tensor indicating whether a valid bounding box is returned
(True) or whether the default box is returned (False).
"""
with tf.name_scope(scope, 'SelectRandomBox'):
bboxes = boxlist.get()
combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)
number_of_boxes = combined_shape[0]
default_box = default_box or tf.constant([[-1., -1., -1., -1.]])
def select_box():
random_index = tf.random_uniform([],
maxval=number_of_boxes,
dtype=tf.int32,
seed=seed)
return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)
return tf.cond(
tf.greater_equal(number_of_boxes, 1),
true_fn=select_box,
false_fn=lambda: (default_box, tf.constant(False)))
def get_minimal_coverage_box(boxlist,
default_box=None,
scope=None):
"""Creates a single bounding box which covers all boxes in the boxlist.
Args:
boxlist: A Boxlist.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[0., 0., 1., 1.]].
scope: Name scope.
Returns:
A [1, 4] float32 tensor with a bounding box that tightly covers all the
boxes in the box list. If the boxlist does not contain any boxes, the
default box is returned.
"""
with tf.name_scope(scope, 'CreateCoverageBox'):
num_boxes = boxlist.num_boxes()
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
default_box = default_box or tf.constant([[0., 0., 1., 1.]])
return tf.cond(
tf.greater_equal(num_boxes, 1),
true_fn=lambda: coverage_box(boxlist.get()),
false_fn=lambda: default_box)
| |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from troveclient import common
from stashboard import api as trove_api
from stashboard.content.database_clusters \
import cluster_manager
from stashboard.content.database_clusters import tables
from stashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:database_clusters:index')
LAUNCH_URL = reverse('horizon:project:database_clusters:launch')
DETAILS_URL = reverse('horizon:project:database_clusters:detail', args=['id'])
RESET_PASSWORD_VIEWNAME = 'horizon:project:database_clusters:reset_password'
class ClustersTests(test.TestCase):
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_flavor_exception(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('cluster_list',)})
def test_index_list_exception(self):
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_pagination(self):
clusters = self.trove_clusters.list()
last_record = clusters[1]
clusters = common.Paginated(clusters, next_marker="foo")
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_stubs({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_flavor_list_exception(self):
clusters = common.Paginated(self.trove_clusters.list())
trove_api.trove.cluster_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(clusters)
trove_api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({trove_api.trove: ('datastore_flavors',
'datastore_list',
'datastore_version_list'),
api.base: ['is_service_enabled']})
def test_launch_cluster(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(False)
filtered_datastores = self._get_filtered_datastores('mongodb')
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(filtered_datastores)
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(
self._get_filtered_datastore_versions(filtered_datastores))
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
self.assertTemplateUsed(res, 'project/database_clusters/launch.html')
def test_launch_cluster_mongo_fields(self):
datastore = 'mongodb'
fields = self.launch_cluster_fields_setup(datastore, '2.6')
self.assertTrue(self._contains_datastore_in_attribute(
fields['flavor'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_shards'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['root_password'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['vertica_flavor'], datastore))
def test_launch_cluster_redis_fields(self):
datastore = 'redis'
fields = self.launch_cluster_fields_setup(datastore, '3.0')
self.assertTrue(self._contains_datastore_in_attribute(
fields['flavor'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_shards'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['root_password'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['vertica_flavor'], datastore))
def test_launch_cluster_vertica_fields(self):
datastore = 'vertica'
fields = self.launch_cluster_fields_setup(datastore, '7.1')
self.assertFalse(self._contains_datastore_in_attribute(
fields['flavor'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances'], datastore))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_shards'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['root_password'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], datastore))
self.assertTrue(self._contains_datastore_in_attribute(
fields['vertica_flavor'], datastore))
@test.create_stubs({trove_api.trove: ('datastore_flavors',
'datastore_list',
'datastore_version_list'),
api.base: ['is_service_enabled']})
def launch_cluster_fields_setup(self, datastore, datastore_version):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(False)
filtered_datastores = self._get_filtered_datastores(datastore)
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
datastore, datastore_version)\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(filtered_datastores)
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(
self._get_filtered_datastore_versions(filtered_datastores))
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
return res.context_data['form'].fields
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(False)
filtered_datastores = self._get_filtered_datastores('mongodb')
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(filtered_datastores)
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(
self._get_filtered_datastore_versions(filtered_datastores))
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u''
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None,
locality=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster_neutron(self):
api.base.is_service_enabled(IsA(http.HttpRequest), 'network')\
.AndReturn(True)
api.neutron.network_list_for_tenant(IsA(http.HttpRequest), '1')\
.AndReturn(self.networks.list())
filtered_datastores = self._get_filtered_datastores('mongodb')
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(filtered_datastores)
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(
self._get_filtered_datastore_versions(filtered_datastores))
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u'82288d84-e0a5-42ac-95be-e6af08727e42'
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None,
locality=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant']})
def test_create_simple_cluster_exception(self):
api.neutron.network_list_for_tenant(IsA(http.HttpRequest), '1')\
.AndReturn(self.networks.list())
filtered_datastores = self._get_filtered_datastores('mongodb')
trove_api.trove.datastore_flavors(IsA(http.HttpRequest),
'mongodb', '2.6')\
.AndReturn(self.flavors.list())
trove_api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(filtered_datastores)
trove_api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(
self._get_filtered_datastore_versions(filtered_datastores))
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u'82288d84-e0a5-42ac-95be-e6af08727e42'
trove_api.trove.cluster_create(
IsA(http.HttpRequest),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None,
locality=None).AndReturn(self.trove_clusters.first())
self.mox.ReplayAll()
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'num_instances_per_shards': cluster_instances,
'datastore': cluster_datastore + u'-' + cluster_datastore_version,
'flavor': cluster_flavor,
'network': cluster_network
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.MultipleTimes().AndReturn(cluster)
trove_api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\
.MultipleTimes().AndReturn(self.databases.first())
trove_api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.MultipleTimes().AndReturn(self.flavors.first())
self.mox.ReplayAll()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, cluster.ip[0])
@test.create_stubs({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details_without_locality(self):
cluster = self.trove_clusters.list()[1]
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id) \
.MultipleTimes().AndReturn(cluster)
trove_api.trove.instance_get(IsA(http.HttpRequest), IsA(str)) \
.MultipleTimes().AndReturn(self.databases.first())
trove_api.trove.flavor_get(IsA(http.HttpRequest), IsA(str)) \
.MultipleTimes().AndReturn(self.flavors.first())
self.mox.ReplayAll()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNotContains(res, "Locality")
@test.create_stubs({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details_with_locality(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id) \
.MultipleTimes().AndReturn(cluster)
trove_api.trove.instance_get(IsA(http.HttpRequest), IsA(str)) \
.MultipleTimes().AndReturn(self.databases.first())
trove_api.trove.flavor_get(IsA(http.HttpRequest), IsA(str)) \
.MultipleTimes().AndReturn(self.flavors.first())
self.mox.ReplayAll()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assertTemplateUsed(res, 'project/database_clusters/'
'_detail_overview.html')
self.assertContains(res, "Locality")
@test.create_stubs(
{trove_api.trove: ('cluster_get',
'cluster_grow'),
cluster_manager: ('get',)})
def test_grow_cluster(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.AndReturn(cluster)
cluster_volume = 1
flavor = self.flavors.first()
cluster_flavor = flavor.id
cluster_flavor_name = flavor.name
instances = [
cluster_manager.ClusterInstance("id1", "name1", cluster_flavor,
cluster_flavor_name,
cluster_volume, "master", None,
None),
cluster_manager.ClusterInstance("id2", "name2", cluster_flavor,
cluster_flavor_name,
cluster_volume, "slave",
"master", None),
cluster_manager.ClusterInstance("id3", None, cluster_flavor,
cluster_flavor_name,
cluster_volume, None, None, None),
]
manager = cluster_manager.ClusterInstanceManager(cluster.id)
manager.instances = instances
cluster_manager.get(cluster.id).MultipleTimes().AndReturn(manager)
trove_api.trove.cluster_grow(IsA(http.HttpRequest),
cluster.id,
instances)
self.mox.ReplayAll()
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
table = res.context_data[
"".join([tables.ClusterGrowInstancesTable.Meta.name, '_table'])]
self.assertEqual(len(cluster.instances), len(table.data))
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowRemoveInstance.name, '__',
'id1'])
self.client.post(url, {'action': action})
self.assertEqual(len(cluster.instances) - 1, len(table.data))
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
res = self.client.post(url, {'action': action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({trove_api.trove: ('cluster_get',)})
def test_grow_cluster_no_instances(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.AndReturn(cluster)
self.mox.ReplayAll()
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
self.client.post(url, {'action': action})
self.assertMessageCount(info=1)
@test.create_stubs(
{trove_api.trove: ('cluster_get',
'cluster_grow',),
cluster_manager: ('get',)})
def test_grow_cluster_exception(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.AndReturn(cluster)
cluster_volume = 1
flavor = self.flavors.first()
cluster_flavor = flavor.id
cluster_flavor_name = flavor.name
instances = [
cluster_manager.ClusterInstance("id1", "name1", cluster_flavor,
cluster_flavor_name,
cluster_volume, "master", None,
None),
cluster_manager.ClusterInstance("id2", "name2", cluster_flavor,
cluster_flavor_name,
cluster_volume, "slave",
"master", None),
cluster_manager.ClusterInstance("id3", None, cluster_flavor,
cluster_flavor_name,
cluster_volume, None, None, None),
]
manager = cluster_manager.ClusterInstanceManager(cluster.id)
manager.instances = instances
cluster_manager.get(cluster.id).MultipleTimes().AndReturn(manager)
trove_api.trove.cluster_grow(IsA(http.HttpRequest),
cluster.id,
instances).AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
toSuppress = ["stashboard.content.database_clusters.tables"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
res = self.client.post(url, {'action': action})
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_stubs({trove_api.trove: ('cluster_get',
'cluster_shrink')})
def test_shrink_cluster(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.MultipleTimes().AndReturn(cluster)
instance_id = cluster.instances[0]['id']
cluster_instances = [{'id': instance_id}]
trove_api.trove.cluster_shrink(IsA(http.HttpRequest),
cluster.id,
cluster_instances)
self.mox.ReplayAll()
url = reverse(
'horizon:project:database_clusters:cluster_shrink_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_shrink_details.html')
table = res.context_data[
"".join([tables.ClusterShrinkInstancesTable.Meta.name, '_table'])]
self.assertEqual(len(cluster.instances), len(table.data))
action = "".join([tables.ClusterShrinkInstancesTable.Meta.name, '__',
tables.ClusterShrinkAction.name, '__',
instance_id])
res = self.client.post(url, {'action': action})
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({trove_api.trove: ('cluster_get',
'cluster_shrink')})
def test_shrink_cluster_exception(self):
cluster = self.trove_clusters.first()
trove_api.trove.cluster_get(IsA(http.HttpRequest), cluster.id)\
.MultipleTimes().AndReturn(cluster)
cluster_id = cluster.instances[0]['id']
cluster_instances = [cluster_id]
trove_api.trove.cluster_shrink(IsA(http.HttpRequest),
cluster.id,
cluster_instances)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
url = reverse(
'horizon:project:database_clusters:cluster_shrink_details',
args=[cluster.id])
action = "".join([tables.ClusterShrinkInstancesTable.Meta.name, '__',
tables.ClusterShrinkAction.name, '__',
cluster_id])
toSuppress = ["stashboard.content.database_clusters.tables"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
res = self.client.post(url, {'action': action})
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
def _get_filtered_datastores(self, datastore):
filtered_datastore = []
for ds in self.datastores.list():
if datastore in ds.name:
filtered_datastore.append(ds)
return filtered_datastore
def _get_filtered_datastore_versions(self, datastores):
filtered_datastore_versions = []
for ds in datastores:
for dsv in self.datastore_versions.list():
if ds.id == dsv.datastore:
filtered_datastore_versions.append(dsv)
return filtered_datastore_versions
def _contains_datastore_in_attribute(self, field, datastore):
for key, value in field.widget.attrs.iteritems():
if datastore in key:
return True
return False
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests of the auto-batching VM."""
import functools
# Dependency imports
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_probability.python.experimental.auto_batching import numpy_backend
from tensorflow_probability.python.experimental.auto_batching import test_programs
from tensorflow_probability.python.experimental.auto_batching import tf_backend
from tensorflow_probability.python.experimental.auto_batching import virtual_machine as vm
from tensorflow_probability.python.internal import test_util
flags.DEFINE_string('test_device', None,
'TensorFlow device on which to place operators under test')
FLAGS = flags.FLAGS
NP_BACKEND = numpy_backend.NumpyBackend()
TF_BACKEND = tf_backend.TensorFlowBackend()
TF_BACKEND_NO_ASSERTS = tf_backend.TensorFlowBackend(safety_checks=False)
# This program always returns 2.
def _constant_execute(inputs, backend):
# Stack depth limit is 4 to accommodate the initial value and
# two pushes to "answer".
with tf.compat.v2.name_scope('constant_program'):
return vm.execute(
test_programs.constant_program(), [inputs],
max_stack_depth=4, backend=backend)
# This program returns n > 1 ? 2 : 0.
def _single_if_execute(inputs, backend):
with tf.compat.v2.name_scope('single_if_program'):
return vm.execute(
test_programs.single_if_program(), [inputs],
max_stack_depth=3, backend=backend)
def _product_type_execute(inputs, backend):
with tf.compat.v2.name_scope('product_types_program'):
return vm.execute(
test_programs.synthetic_pattern_variable_program(), [inputs],
max_stack_depth=4, backend=backend)
# This program returns fib(n), where fib(0) = fib(1) = 1.
def _fibonacci_execute(inputs, backend):
with tf.compat.v2.name_scope('fibonacci_program'):
return vm.execute(
test_programs.fibonacci_program(), [inputs],
max_stack_depth=15, backend=backend)
class VMTest(test_util.TestCase):
def _tfTestHelper(self, run_asserts_fn, execute_program_fn):
# Note: test_device is 'cpu', 'gpu', etc.
# Various int32 and int64 kernels are missing for GPU, so we skip direct
# tests on the GPU device, but test XLA on GPU below.
if 'cpu' in FLAGS.test_device.lower():
# Make sure everything works with no XLA compilation.
with tf.device('CPU:0'):
run_asserts_fn(
functools.partial(execute_program_fn, backend=TF_BACKEND))
# Force XLA compilation using tf.function.
backend = TF_BACKEND_NO_ASSERTS
f = functools.partial(execute_program_fn, backend=backend)
f = tf.function(f, autograph=False, jit_compile=True)
with tf.device(FLAGS.test_device):
run_asserts_fn(f)
def testConstantNumpy(self):
self.assertAllEqual([2], _constant_execute([5], NP_BACKEND))
self.assertAllEqual([2, 2, 2], _constant_execute([5, 10, 15], NP_BACKEND))
def testConstantTF(self):
def _asserts_fn(f):
ph = tf.compat.v1.placeholder_with_default(np.int64([8, 3]), shape=None)
results = self.evaluate(
[f(tf.cast([5], tf.int64)),
f(tf.cast([5, 10, 15], tf.int64)),
f(ph)])
self.assertAllEqual([2], results[0])
self.assertAllEqual([2, 2, 2], results[1])
self.assertAllEqual([2, 2], results[2])
self._tfTestHelper(_asserts_fn, _constant_execute)
def testSingleIfNumpy(self):
self.assertAllEqual([0], _single_if_execute([1], NP_BACKEND))
self.assertAllEqual([2], _single_if_execute([3], NP_BACKEND))
self.assertAllEqual([0, 2, 0], _single_if_execute([0, 5, -15], NP_BACKEND))
def testSingleIfTF(self):
def _asserts_fn(f):
ph = tf.compat.v1.placeholder_with_default(np.int64([-3, 7]), shape=None)
results = self.evaluate(
[f(tf.cast([1], tf.int64)),
f(tf.cast([3], tf.int64)),
f(tf.cast([0, 5, -15], tf.int64)),
f(ph)])
self.assertAllEqual([0], results[0])
self.assertAllEqual([2], results[1])
self.assertAllEqual([0, 2, 0], results[2])
self.assertAllEqual([0, 2], results[3])
self._tfTestHelper(_asserts_fn, _single_if_execute)
def testProductTypesNumpy(self):
self.assertAllEqual(
[[5, 6, 7], [6, 7, 8]], _product_type_execute([3, 4, 5], NP_BACKEND))
def testProductTypesTF(self):
def _asserts_fn(f):
results = self.evaluate([f([3, 4, 5])])
self.assertAllEqual([[5, 6, 7], [6, 7, 8]], results[0])
self._tfTestHelper(_asserts_fn, _product_type_execute)
def testFibonacciNumpy(self):
self.assertAllEqual([8], _fibonacci_execute([5], NP_BACKEND))
self.assertAllEqual(
[8, 13, 34, 55], _fibonacci_execute([5, 6, 8, 9], NP_BACKEND))
def testFibonacciTF(self):
def _asserts_fn(f):
ph = tf.compat.v1.placeholder_with_default(
np.int64([0, 1, 3]), shape=None)
results = self.evaluate([f([5]), f([5, 6, 8, 9]), f(ph)])
self.assertAllEqual([8], results[0])
self.assertAllEqual([8, 13, 34, 55], results[1])
self.assertAllEqual([1, 1, 3], results[2])
self._tfTestHelper(_asserts_fn, _fibonacci_execute)
def testPeaNutsNumpy(self):
def execute(batch_size, latent_size, data_size):
data = np.random.normal(size=(data_size, latent_size)).astype(np.float32)
def step_state(state):
return state + np.sum(np.tensordot(data, state, ([1], [1])))
state = np.random.normal(
size=(batch_size, latent_size)).astype(np.float32)
def choose_depth(count):
del count
return 3
program = test_programs.pea_nuts_program(
(latent_size,), choose_depth, step_state)
input_counts = np.array([3] * batch_size)
return vm.execute(
program, [input_counts, state], 10, backend=NP_BACKEND)
# Check that running the program doesn't crash
result = execute(4, 3, 10)
self.assertEqual((4, 3), result.shape)
def testPeaNutsTF(self):
batch_size = 4
latent_size = 3
data_size = 10
def execute(_, backend):
data = tf.random.normal(shape=(data_size, latent_size), dtype=np.float32)
def step_state(state):
return state + tf.reduce_sum(
input_tensor=tf.tensordot(data, state, ([1], [1])))
state = tf.random.normal(
shape=(batch_size, latent_size), dtype=np.float32)
def choose_depth(count):
del count
return 2
program = test_programs.pea_nuts_program(
(latent_size,), choose_depth, step_state)
input_counts = np.array([3] * batch_size)
return vm.execute(
program, [input_counts, state], 10, backend=backend)
# Check that running the program doesn't crash
def _asserts_fn(f):
result = self.evaluate(f(()))
self.assertEqual((batch_size, latent_size), result.shape)
self._tfTestHelper(_asserts_fn, execute)
if __name__ == '__main__':
test_util.main()
| |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Miscellaneous utility classes and functions for the dx-app-wizard command-line tool
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import os, shutil, subprocess, re, json
import stat
from ..utils.printing import (BOLD, DNANEXUS_LOGO, ENDC, fill)
from ..cli import prompt_for_yn
from ..compat import input, open
from . import python
from . import bash
language_options = {
"Python": python,
"bash": bash
}
completer_state = {
"available": False
}
try:
try:
import gnureadline as readline
except ImportError:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
readline.set_completer_delims("")
completer_state['available'] = True
except ImportError:
print('NOTE: readline module not available. Install for tab-completion.')
class Completer():
def __init__(self, choices):
self.matches = None
self.choices = choices
def complete(self, text, state):
if state == 0:
self.matches = filter(lambda choice: choice.startswith(text),
self.choices)
if self.matches is not None and state < len(self.matches):
return self.matches[state]
else:
return None
def clean(s):
return "\n".join(line.rstrip() for line in s.split("\n"))
def use_completer(completer=None):
if completer_state['available'] and completer is not None:
readline.set_completer(completer.complete)
# Expect default to be a default string value
# Expect choices to be a list of strings
def prompt_for_var(prompt_str, default=None, allow_empty=False, choices=None):
prompt = prompt_str
if default is not None:
prompt += ' [' + default + ']: '
else:
prompt += ': '
while True:
try:
value = input(prompt)
except KeyboardInterrupt:
print('')
exit(1)
except EOFError:
print('')
exit(1)
if value != '':
if choices is not None and value not in choices:
print('Error: unrecognized response, expected one of ' + json.dumps(choices))
else:
return value
elif default is not None:
return default
elif allow_empty:
return value
def print_intro(api_version):
print(DNANEXUS_LOGO() + ' App Wizard, API v' + api_version)
print('')
print(BOLD() + 'Basic Metadata' + ENDC())
print('')
print(fill('''Please enter basic metadata fields that will be used to
describe your app. Optional fields are denoted by options with square
brackets. At the end of this wizard, the files necessary for building your
app will be generated from the answers you provide.'''))
print('')
def get_name(default=None):
print(fill('The ' + BOLD() + 'name' + ENDC() + ' of your app must be unique on the DNAnexus platform. After creating your app for the first time, you will be able to publish new versions using the same app name. App names are restricted to alphanumeric characters (a-z, A-Z, 0-9), and the characters ".", "_", and "-".'))
name_pattern = re.compile('^[a-zA-Z0-9._-]+$')
while True:
name = prompt_for_var('App Name', default)
if name_pattern.match(name) is None:
print(fill('The name of your app must match /^[a-zA-Z0-9._-]+$/'))
else:
if os.path.exists(name):
if os.path.isdir(name):
remove_dir = prompt_for_yn('The directory %s already exists. Would you like to remove all of its contents and create a new directory in its place?' % name)
if remove_dir:
shutil.rmtree(name)
print(fill('Replacing all contents of directory %s...' % name))
else:
print('')
continue
else:
print(fill('A file named %s already exists. Please choose another name or rename your file' % name))
continue
break
return name
def get_metadata(api_version):
print('')
print(fill('The ' + BOLD() + 'title' + ENDC() + ', if provided, is what is shown as the name of your app on the website. It can be any valid UTF-8 string.'))
title = prompt_for_var('Title', '')
print('')
print(fill('The ' + BOLD() + 'summary' + ENDC() + ' of your app is a short phrase or one-line description of what your app does. It can be any UTF-8 human-readable string.'))
summary = prompt_for_var('Summary', '')
return title, summary
def get_version(default=None):
if default is None:
default = '0.0.1'
print('')
print(fill('You can publish multiple versions of your app, and the ' + BOLD() + 'version' + ENDC() + ' of your app is a string with which to tag a particular version. We encourage the use of Semantic Versioning for labeling your apps (see http://semver.org/ for more details).'))
version = prompt_for_var('Version', default)
return version
def get_timeout(default=None):
# Max timeout is 30 days
max_timeout = {'m': 30 * 24 * 60, 'h': 30 * 24, 'd': 30}
units = {'m': 'minutes', 'h': 'hours', 'd': 'days'}
time_pattern = re.compile('^[1-9]\d*[mhd]$')
def timeout_dict_to_str(d):
# Used to convert app_json inputs:
# {'hours': 48} -> '48h'
return str(d.values()[0]) + d.keys()[0][0]
if default is None:
default = '48h'
else:
default = timeout_dict_to_str(default)
print('')
print(fill('Set a ' + BOLD() + 'timeout policy' + ENDC() + ' for your app. Any single entry point of the app that runs longer than the specified timeout will fail with a TimeoutExceeded error. Enter an int greater than 0 with a single-letter suffix (m=minutes, h=hours, d=days) (e.g. "48h").'))
while True:
timeout = prompt_for_var('Timeout policy', default)
if not time_pattern.match(timeout):
print(fill('Error: enter an int with a single-letter suffix (m=minutes, h=hours, d=days)'))
elif int(timeout[:-1]) > max_timeout[timeout[-1]]:
print(fill('Error: max allowed timeout is 30 days'))
else:
break
return int(timeout[:-1]), units[timeout[-1]]
def get_ordinal_str(num):
return str(num) + ('th' if 11 <= num % 100 <= 13 else {1: 'st', 2: 'nd', 3: 'rd'}.get(num % 10, 'th'))
def get_language():
#language_choices = language_options.keys()
language_choices = ["Python", "bash"]
use_completer(Completer(language_choices))
print('')
print(fill('You can write your app in any ' + BOLD() + 'programming language' + ENDC() + ', but we provide templates for the following supported languages' + ENDC() + ": " + ', '.join(language_choices)))
language = prompt_for_var('Programming language', choices=language_choices)
use_completer()
return language
def get_pattern(template_dir):
pattern_choices = []
print('')
print(fill('The following common ' + BOLD() + 'execution patterns' + ENDC() + ' are currently available for your programming language:'))
pattern_choices.append('basic')
print(' ' + BOLD() + 'basic' + ENDC())
print(fill('Your app will run on a single machine from beginning to end.', initial_indent=' ', subsequent_indent=' '))
if os.path.isdir(os.path.join(template_dir, 'parallelized')):
pattern_choices.append('parallelized')
print(' ' + BOLD() + 'parallelized' + ENDC())
print(fill('Your app will subdivide a large chunk of work into multiple pieces that can be processed in parallel and independently of each other, followed by a final stage that will merge and process the results as necessary.', initial_indent=' ', subsequent_indent=' '))
if os.path.isdir(os.path.join(template_dir, 'scatter-process-gather')):
pattern_choices.append('scatter-process-gather')
print(' ' + BOLD() + 'scatter-process-gather' + ENDC())
print(fill('Similar to ' + BOLD() + 'parallelized' + ENDC() + ' but with the addition of a "scatter" entry point. This allows you to break out the execution for splitting up the input, or you can call a separate app/applet to perform the splitting.',
initial_indent=' ',
subsequent_indent=' '))
if len(pattern_choices) == 1:
print('Automatically using the execution pattern "basic"')
return 'basic'
use_completer(Completer(pattern_choices))
pattern = prompt_for_var('Execution pattern', 'basic', choices=pattern_choices)
use_completer()
return pattern
def fill_in_name_and_ver(template_string, name, version):
'''
TODO: Rename this?
'''
return template_string.replace('DX_APP_WIZARD_NAME', name).replace('DX_APP_WIZARD_VERSION', version)
def format_io_spec_to_markdown(io_spec):
io_spec = dict(io_spec)
if 'label' not in io_spec:
io_spec['label'] = io_spec['name']
if 'help' not in io_spec:
io_spec['help'] = ''
else:
io_spec['help'] = ' ' + io_spec['help']
return '* **{label}** ``{name}``: ``{class}``{help}'.format(**io_spec)
def create_files_from_templates(template_dir, app_json, language,
required_file_input_names, optional_file_input_names,
required_file_array_input_names, optional_file_array_input_names,
file_output_names,
pattern, pattern_suffix='',
parallelized_input='', parallelized_output='', description='',
entry_points=()):
manifest = []
name = app_json['name']
title = app_json.get('title', name)
summary = app_json.get('summary', '')
version = app_json['version']
pattern_suffix += '.'
# List all files in template_dir (other than dxapp.json) and add
# those (after passing it through fill_in_name_and_ver). For the
# code.* in src,
def chmod_755(file):
try:
os.chmod(file,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH |
stat.S_IXOTH)
except OSError as e:
print("Unable to change file {} mode: {}".format(file, e))
def use_template_file(path):
'''
:param path: relative path from template_dir
'''
with open(os.path.join(template_dir, path), 'r') as template_file:
file_text = fill_in_name_and_ver(template_file.read(), name, version)
filled_template_filename = os.path.join(name, path)
with open(filled_template_filename, 'w') as filled_template_file:
filled_template_file.write(file_text)
if filled_template_filename.endswith('.py') or filled_template_filename.endswith('.sh'):
chmod_755(filled_template_filename)
manifest.append(filled_template_filename)
for template_filename in os.listdir(template_dir):
if template_filename in ['src', 'test', 'dxapp.json'] or template_filename.endswith('~'):
continue
use_template_file(template_filename)
if os.path.exists(os.path.join(template_dir, 'test')):
for template_filename in os.listdir(os.path.join(template_dir, 'test')):
if any(template_filename.endswith(ext) for ext in ('~', '.pyc', '.pyo', '__pycache__')):
continue
use_template_file(os.path.join('test', template_filename))
for template_filename in os.listdir(os.path.join(template_dir, 'src')):
if template_filename.endswith('~'):
continue
elif template_filename.startswith('code'):
if template_filename.startswith('code' + pattern_suffix):
with open(os.path.join(template_dir, 'src', template_filename), 'r') as code_template_file:
code_file_text = fill_in_name_and_ver(code_template_file.read(), name, version)
if "outputSpec" in app_json:
dummy_output_hash = {output["name"]: None for output in app_json["outputSpec"]}
else:
dummy_output_hash = {}
input_sig_str, init_inputs_str, dl_files_str, ul_files_str, outputs_str = \
language_options[language].get_strings(app_json,
required_file_input_names,
optional_file_input_names,
required_file_array_input_names,
optional_file_array_input_names,
file_output_names,
dummy_output_hash)
code_file_text = code_file_text.replace('DX_APP_WIZARD_INPUT_SIGNATURE', input_sig_str)
code_file_text = code_file_text.replace('DX_APP_WIZARD_INITIALIZE_INPUT', init_inputs_str)
code_file_text = code_file_text.replace('DX_APP_WIZARD_DOWNLOAD_ANY_FILES', dl_files_str)
code_file_text = code_file_text.replace('DX_APP_WIZARD_UPLOAD_ANY_FILES', ul_files_str)
code_file_text = code_file_text.replace('DX_APP_WIZARD_OUTPUT', outputs_str)
code_file_text = code_file_text.replace('DX_APP_WIZARD_PARALLELIZED_INPUT', parallelized_input)
code_file_text = code_file_text.replace('DX_APP_WIZARD_PARALLELIZED_OUTPUT', parallelized_output)
filled_code_filename = os.path.join(name, 'src', template_filename.replace('code' + pattern_suffix, name + '.'))
with open(filled_code_filename, 'w') as filled_code_file:
filled_code_file.write(code_file_text)
if filled_code_filename.endswith('.sh') or filled_code_filename.endswith('.py'):
chmod_755(filled_code_filename)
manifest.append(filled_code_filename)
else:
use_template_file(os.path.join('src', template_filename))
# Readme file
readme_template = '''<!-- dx-header -->
# {app_title} (DNAnexus Platform App)
{summary}
This is the source code for an app that runs on the DNAnexus Platform.
For more information about how to run or modify it, see
https://documentation.dnanexus.com/.
<!-- /dx-header -->
{description}
<!--
TODO: This app directory was automatically generated by dx-app-wizard;
please edit this Readme.md file to include essential documentation about
your app that would be helpful to users. (Also see the
Readme.developer.md.) Once you're done, you can remove these TODO
comments.
For more info, see https://documentation.dnanexus.com/developer.
-->
'''
with open(os.path.join(name, 'Readme.md'), 'w') as readme_file:
readme_file.write(readme_template.format(app_title=title, summary=summary, description=description))
manifest.append(os.path.join(name, 'Readme.md'))
# Developer readme
developer_readme_template = '''# {app_name} Developer Readme
<!--
TODO: Please edit this Readme.developer.md file to include information
for developers or advanced users, for example:
* Information about app internals and implementation details
* How to report bugs or contribute to development
-->
## Running this app with additional computational resources
This app has the following entry points:
{entry_points_list}
{instance_type_override_message}
{{
systemRequirements: {{
{entry_points_hash}
}},
[...]
}}
See <a
href="https://documentation.dnanexus.com/developer/api/running-analyses/io-and-run-specifications#run-specification">Run
Specification</a> in the API documentation for more information about the
available instance types.
'''
entry_points_list = '\n'.join(['* {0}'.format(entry_point) for entry_point in entry_points])
if len(entry_points) > 1:
instance_type_override_message = '''When running this app, you can override the instance type to be used for each
entry point by providing the ``systemRequirements`` field to
```/applet-XXXX/run``` or ```/app-XXXX/run```, as follows:'''
else:
instance_type_override_message = '''When running this app, you can override the instance type to be used by
providing the ``systemRequirements`` field to ```/applet-XXXX/run``` or
```/app-XXXX/run```, as follows:'''
entry_points_hash = ",\n ".join(['"{entry_point}": {{"instanceType": "mem2_hdd2_x2"}}'.format(entry_point=entry_point) for entry_point in entry_points])
with open(os.path.join(name, 'Readme.developer.md'), 'w') as developer_readme_file:
developer_readme_file.write(developer_readme_template.format(
app_name=name,
entry_points_list=entry_points_list,
entry_points_hash=entry_points_hash,
instance_type_override_message=instance_type_override_message
))
manifest.append(os.path.join(name, 'Readme.developer.md'))
return manifest
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay.loops import while_loop
from tvm.relay.testing import run_infer_type as infer_type
from util.assert_diagnostic import DiagnosticTesting
import tvm.topi.testing
def int32(val):
return relay.const(val, "int32")
def any_dims(ndim):
shape = []
for _ in range(ndim):
shape.append(relay.Any())
return tuple(shape)
def check_result(
args, mod, expected, flatten=False, assert_shape=False, only_vm=False, targets=None
):
for kind in ["debug", "vm"]:
targets = targets or tvm.testing.enabled_targets()
for tgt, ctx in targets:
if kind == "debug" and (only_vm or ctx.device_type != tvm.cpu().device_type):
continue
ex = relay.create_executor(kind, mod=mod, ctx=ctx, target=tgt)
result = ex.evaluate()(*args)
result = result.asnumpy()
if assert_shape:
assert result.shape == expected, "Shape mismatch: expect %s but got %s." % (
str(expected),
str(result.shape),
)
return
if flatten:
result = result.flatten()
expected = expected.flatten()
tvm.testing.assert_allclose(result, expected, atol=2e-6)
def verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], op(x, y))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
y_np = np.random.uniform(size=y_np_shape).astype(dtype)
res_np = np_op(x_np, y_np)
check_result([x_np, y_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_broadcast():
# Test broadcast with 1s
verify_any_broadcast((relay.Any(),), (3, 2), (1,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (1, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (3, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (1, 2), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, relay.Any()), (1, 2), (3, 1), relay.add, np.add)
# Test broadcast with values other than 1
verify_any_broadcast((relay.Any(),), (3, 2), (2,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (3, 2), (3, 2), relay.add, np.add)
def verify_any_elemwise(x_shape, x_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_elemwise():
verify_any_elemwise((relay.Any(),), (3,), relay.sqrt, np.sqrt)
verify_any_elemwise((relay.Any(), 2), (5, 2), relay.negative, np.negative)
verify_any_elemwise((relay.Any(), relay.Any()), (5, 4), relay.exp, np.exp)
@tvm.testing.uses_gpu
def test_any_broadcast_fail():
# Test broadcast with incompatible values at runtime
def check_fail(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
try:
verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op)
except tvm._ffi.base.TVMError:
pass
else:
assert False
check_fail((relay.Any(),), (3, 2), (1,), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 2), (4, 2), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, relay.Any()), (1, 2), (4, 1), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 3), (1, 3), (3, 3), relay.add, np.add)
check_fail((relay.Any(),), (3, 2), (2), (4, 2), relay.add, np.add)
def verify_any_full_like(x_shape, x_np_shape, relay_op, np_op, dtype="float32"):
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay_op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full_like():
# zeros_like, ones_like
verify_any_full_like(any_dims(3), (2, 3, 5), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(
any_dims(5), (10, 11, 12, 13, 14), relay.zeros_like, np.zeros_like, "int32"
)
verify_any_full_like(any_dims(3), (2, 3, 5), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(5), (10, 11, 12, 13, 14), relay.ones_like, np.ones_like, "int32")
def verify_any_full(x_np_shape, relay_op, np_op, dtype="float32", value=None):
x = relay.var("x", shape=(len(x_np_shape),), dtype="int32")
mod = tvm.IRModule()
out = relay_op(x, dtype) if value is None else relay_op(relay.expr.const(value), x, dtype)
mod["main"] = relay.Function([x], out)
res_np = np_op(x_np_shape) if value is None else np_op(x_np_shape, value)
x_np = np.array(x_np_shape).astype("int32")
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full():
# zeros, ones, full
verify_any_full((2, 3, 5), relay.zeros, np.zeros, "float32")
verify_any_full((225, 115, 15), relay.zeros, np.zeros, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.zeros, np.zeros, "int32")
verify_any_full((2, 3, 5), relay.ones, np.ones, "float32")
verify_any_full((225, 115, 15), relay.ones, np.ones, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.ones, np.ones, "int32")
verify_any_full((10, 11, 12, 13, 14), relay.full, np.full, "float32", 2.0)
verify_any_full((1, 2, 3, 4), relay.full, np.full, "int32", -2)
@tvm.testing.uses_gpu
def test_any_concat():
x = relay.var("x", shape=(relay.Any(), 2), dtype="float32")
y = relay.var("y", shape=(1, 2), dtype="float32")
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(3, 2)).astype("float32")
y_np = np.random.uniform(size=(1, 2)).astype("float32")
ref = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_result([x_np, y_np], mod, ref)
def verify_any_reshape(x_shape, newshape, x_np_shape, out_shape, variable_newshape=False):
x = relay.var("x", shape=x_shape, dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=x_np_shape).astype("float32")
params = [x]
args = [data]
if variable_newshape:
newshape_var = relay.var("newshape", shape=(len(newshape),), dtype="int64")
params.append(newshape_var)
args.append(np.array(newshape, dtype="int64"))
newshape = newshape_var
y = relay.reshape(relu_x, newshape=newshape)
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
check_result(args, mod, data, flatten=True)
@tvm.testing.uses_gpu
def test_any_reshape():
for variable_newshape in [False, True]:
# Variable newshape only supports that output rank is the same as newshape
verify_any_reshape(any_dims(3), (1, -1), (2, 3, 4), (1, 24), variable_newshape)
verify_any_reshape(any_dims(3), (0, -1), (2, 3, 4), (2, 12), variable_newshape)
verify_any_reshape(any_dims(3), (0, -2), (2, 3, 4), (2, 3, 4))
verify_any_reshape(any_dims(3), (-4, -1, 2, -3), (6, 3, 4), (3, 2, 12))
verify_any_reshape(any_dims(3), (-4, 2, -1, -2), (6, 3, 4), (2, 3, 3, 4))
def verify_any_argwhere(x_shape, x_np_shape, dtype="bool"):
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.argwhere(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.choice([0, 1, 2, 3], size=x_np_shape).astype(dtype)
expected = np.argwhere(data)
for kind in ["debug", "vm"]:
ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
result = ex.evaluate()(data).asnumpy()
assert result.shape == expected.shape
tvm.testing.assert_allclose(result.flatten(), expected.flatten())
# TODO(@zhiics) argwhere gpu schedule is currently not avaiable
# check_result([data], mod, expected, flatten=True)
@tvm.testing.uses_gpu
def test_any_argwhere():
verify_any_argwhere(any_dims(1), (5,))
verify_any_argwhere(any_dims(2), (5, 5))
verify_any_argwhere(any_dims(3), (5, 5, 5))
verify_any_argwhere(any_dims(4), (5, 5, 5, 5))
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5))
verify_any_argwhere(any_dims(1), (5,), "int32")
verify_any_argwhere(any_dims(2), (5, 5), "int32")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int32")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(1), (5,), "int8")
verify_any_argwhere(any_dims(2), (5, 5), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int8")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int8")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int8")
def verify_any_take(data_shape, indices_shape, axis, data_np_shape, indices_np_shape):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype="float32")
indices = relay.var("indices", shape=indices_shape, dtype="int32")
y = relay.take(data, indices, axis=axis)
mod["main"] = relay.Function([data, indices], y)
data_np = np.random.uniform(size=data_np_shape).astype("float32")
if axis is None:
max_index = data_np.size
else:
max_index = data_np.shape[axis]
indices_np = np.random.randint(max_index, size=indices_np_shape).astype("int32")
ref = np.take(data_np, indices_np, axis=axis)
check_result([data_np, indices_np], mod, ref)
@tvm.testing.uses_gpu
def test_any_take():
verify_any_take(any_dims(2), (1,), 0, (4, 5), (1,))
verify_any_take(any_dims(2), (), 0, (4, 5), ())
verify_any_take(any_dims(2), (), None, (4, 5), ())
verify_any_take(any_dims(3), any_dims(2), 1, (3, 4, 5), (2, 3))
verify_any_take(any_dims(2), any_dims(3), None, (4, 5), (2, 3, 4))
verify_any_take(any_dims(2), any_dims(4), -1, (4, 5), (2, 3, 4, 5))
def verify_any_tile(dshape, reps, np_dshape, np_reps):
mod = tvm.IRModule()
x = relay.var("x", shape=dshape, dtype="float32")
y = relay.tile(x, reps=reps)
mod["main"] = relay.Function([x], y)
x_data = np.random.uniform(size=np_dshape).astype("float32")
ref_res = np.tile(x_data, reps=np_reps)
check_result([x_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_tile():
verify_any_tile(any_dims(3), (3, 2, 1), (2, 3, 4), (3, 2, 1))
verify_any_tile(any_dims(3), (1, 2), (2, 3, 4), (1, 2))
verify_any_tile(any_dims(2), (3, 2, 1), (2, 3), (3, 2, 1))
verify_any_tile(any_dims(3), (1,), (2, 3, 4), (1,))
@tvm.testing.uses_gpu
def test_any_shape_of():
x = relay.var("x", shape=any_dims(2), dtype="float32")
y = relay.shape_of(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.uniform(size=(3, 4)).astype("float32")
check_result([data], mod, np.array([3, 4]).astype("int64"))
x = relay.var("x", shape=any_dims(3), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(1, "int32"))
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
check_result([data], mod, np.array(3).astype("int64"))
def verify_any_reduce(
reduce_op, data_shape, axis, exclude, keepdims, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "bool" if reduce_op == relay.all else "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = reduce_op(data, axis, keepdims, exclude)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_reduce():
verify_any_reduce(relay.argmax, any_dims(3), None, False, False, (3, 4, 5), ())
verify_any_reduce(relay.argmin, any_dims(4), 1, False, True, (3, 4, 5, 6), (3, 1, 5, 6))
verify_any_reduce(relay.all, any_dims(3), (1, 2), True, False, (3, 4, 5), (4, 5))
verify_any_reduce(relay.max, any_dims(4), -1, True, True, (3, 4, 5, 6), (1, 1, 1, 6))
verify_any_reduce(relay.min, any_dims(3), (0, 1), False, False, (4, 5, 6), (6,))
verify_any_reduce(relay.prod, any_dims(4), 2, True, True, (3, 4, 5, 6), (1, 1, 5, 1))
verify_any_reduce(relay.mean, any_dims(2), 0, False, False, (1, 2), (2,))
verify_any_reduce(relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6))
def verify_any_layout_transform(
data_shape, src_layout, dst_layout, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.layout_transform(data, src_layout, dst_layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_layout_transform():
verify_any_layout_transform(any_dims(4), "NCHW", "NHWC", (3, 4, 5, 6), (3, 5, 6, 4))
verify_any_layout_transform(
any_dims(5), "NCHW16c", "NCHW2c", (1, 2, 8, 8, 16), (1, 16, 8, 8, 2)
)
verify_any_layout_transform(any_dims(5), "NCHW6n", "NHWC", (3, 4, 5, 6, 6), (18, 5, 6, 4))
verify_any_layout_transform(any_dims(4), "NCHW", "NCHW4c", (3, 4, 5, 6), (3, 1, 5, 6, 4))
verify_any_layout_transform((16, 1), "CH", "C4cH", (16, 1), (4, 4, 1))
def verify_any_expand_dims(data_shape, axis, num_newaxis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.expand_dims(data, axis=axis, num_newaxis=num_newaxis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_expand_dims():
verify_any_expand_dims(any_dims(3), 1, 2, (1, 2, 3), (1, 1, 1, 2, 3))
verify_any_expand_dims(any_dims(3), -1, 2, (1, 2, 3), (1, 2, 3, 1, 1))
def verify_any_transpose(data_shape, axes, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.transpose(data, axes=axes)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.transpose(data_np, axes)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_transpose():
verify_any_transpose(any_dims(3), (1, 0, 2), (10, 3, 2))
verify_any_transpose(any_dims(3), None, (2, 3, 4))
verify_any_transpose(any_dims(6), (0, 1, 3, 2, 5, 4), (11, 12, 2, 1, 9, 17))
verify_any_transpose(any_dims(2), (-1, 0), (3, 2))
def verify_any_squeeze(data_shape, axis, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.squeeze(data, axis=axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.squeeze(data_np, axis)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_squeeze():
verify_any_squeeze((1, relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze(
(1, relay.Any(), relay.Any(), 1, relay.Any(), relay.Any()), (0, 3), (1, 12, 2, 1, 9, 17)
)
@tvm.testing.uses_gpu
def test_any_reshape_like():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=(relay.Any(), 3, 10), dtype=dtype)
shape_like = relay.var("data", shape=(relay.Any(), 5, 6), dtype=dtype)
y = relay.reshape_like(data, shape_like)
mod["main"] = relay.Function([data, shape_like], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
shape_like_np = np.random.uniform(size=(3, 5, 6)).astype(dtype)
check_result([data_np, shape_like_np], mod, shape_like_np.shape, assert_shape=True)
def verify_any_conv2d(
data_shape,
kernel_shape,
strides,
padding,
dilation,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(data, kernel, strides, padding, dilation, kernel_size=kernel_shape[2:4])
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d():
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(2, 2),
(2, 64, 224, 224),
(2, 64, 222, 222),
)
def verify_any_conv2d_NCHWc(
data_shape,
kernel_shape,
strides,
padding,
dilation,
data_layout,
kernel_layout,
out_layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.contrib_conv2d_nchwc(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4],
channels=kernel_shape[0] * kernel_shape[-1],
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout=out_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d_NCHWc():
verify_any_conv2d_NCHWc(
(relay.Any(), 8, 224, 224, 8),
(8, 8, 3, 3, 8, 8),
(1, 1),
(1, 1),
(1, 1),
"NCHW8c",
"OIHW8i8o",
"NCHW8c",
(1, 8, 224, 224, 8),
(1, 8, 224, 224, 8),
)
verify_any_conv2d_NCHWc(
(relay.Any(), 8, 224, 224, 8),
(8, 8, 3, 3, 8, 8),
(1, 1),
(1, 1),
(2, 2),
"NCHW8c",
"OIHW8i8o",
"NCHW8c",
(2, 8, 224, 224, 8),
(2, 8, 222, 222, 8),
)
def verify_any_conv2d_transpose_nchw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:4],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result(
[data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[("llvm", tvm.cpu())]
)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d_transpose_nchw():
verify_any_conv2d_transpose_nchw(
(relay.Any(), 64, 224, 224),
(64, 192, 3, 3),
(1, 1),
(1, 1),
(1, 1),
1,
(2, 64, 224, 224),
(2, 192, 224, 224),
(0, 0),
)
verify_any_conv2d_transpose_nchw(
(relay.Any(), 32, 224, 224),
(32, 64, 3, 3),
(2, 2),
(1, 1),
(1, 1),
1,
(1, 32, 224, 224),
(1, 64, 448, 448),
(1, 1),
)
def verify_any_pool2d(
pool_type, data_shape, pool_size, strides, padding, layout, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.max_pool2d if pool_type == "max" else relay.nn.avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, pool_size, strides, padding, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_pool2d():
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any()),
(3, 3),
(1, 1),
(1, 1),
"NCHW",
(2, 3, 220, 220),
(2, 3, 220, 220),
)
verify_any_pool2d(
"avg",
(relay.Any(), relay.Any(), relay.Any(), 4),
(1, 1),
(2, 2),
(0, 0),
"NHWC",
(3, 220, 220, 4),
(3, 110, 110, 4),
)
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
(3, 3),
(2, 2),
(1, 1),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 110, 110, 4),
)
def verify_any_global_pool2d(pool_type, data_shape, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.global_max_pool2d if pool_type == "max" else relay.nn.global_avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_global_pool2d():
verify_any_global_pool2d(
"max", (relay.Any(), 3, relay.Any(), relay.Any()), "NCHW", (2, 3, 220, 220), (2, 3, 1, 1)
)
verify_any_global_pool2d(
"avg", (relay.Any(), relay.Any(), relay.Any(), 4), "NHWC", (3, 220, 220, 4), (3, 1, 1, 4)
)
verify_any_global_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 1, 1, 4),
)
def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
mod["main"] = relay.Function([data], y.astuple())
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
for kind in ["vm"]:
ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
result = ex.evaluate()(data_np)
for ret, ref_ret in zip(result, ref_out_shape):
assert ret.asnumpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % (
str(ref_ret),
str(ret.asnumpy().shape),
)
@tvm.testing.uses_gpu
def test_any_split():
verify_any_split((relay.Any(), 4), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), relay.Any()), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
@tvm.testing.uses_gpu
def test_any_batch_flatten():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=any_dims(3), dtype=dtype)
y = relay.nn.batch_flatten(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
ref_out_shape = (3, 30)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
def verify_any_dense(
data_shape, weight_shape, units, static_data_shape, static_weight_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=weight_shape, dtype=dtype)
y = relay.nn.dense(data, weight, units)
mod["main"] = relay.Function([data, weight], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
weight_np = np.random.uniform(size=static_weight_shape).astype(dtype)
check_result([data_np, weight_np], mod, ref_out_shape, assert_shape=True)
# TODO(tvm-team) Fix dense schedule
# @tvm.testing.uses_gpu
def test_any_dense():
verify_any_dense(any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8))
verify_any_dense(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50))
@tvm.testing.uses_gpu
def verify_any_pad(data_shape, pad_width, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.pad(data_np, pad_width)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_pad():
verify_any_pad(any_dims(3), ((0, 0), (1, 1), (2, 2)), (1, 2, 3))
verify_any_pad(any_dims(4), ((1, 0), (1, 3), (0, 2), (9, 0)), (13, 11, 3, 1))
def verify_any_dilate(data_shape, strides, static_data_shape, dilation_value=None):
assert len(data_shape) == len(strides)
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if dilation_value is None:
y = relay.nn.dilate(data, strides)
else:
y = relay.nn.dilate(data, strides, dilation_value)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_shape = tuple(
(static_data_shape[i] - 1) * strides[i] + 1 for i in range(len(static_data_shape))
)
if dilation_value is None:
dilation_value = 0.0
ref_out = np.ones(shape=ref_shape, dtype=dtype)
ref_out = dilation_value * ref_out
ref_out[tuple(slice(None, None, strides[i]) for i in range(len(data_shape)))] = data_np
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_dilate():
verify_any_dilate(any_dims(1), (1,), (1,))
verify_any_dilate(any_dims(1), (1,), (5,))
verify_any_dilate(any_dims(1), (5,), (5,))
verify_any_dilate(any_dims(3), (1, 1, 1), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 2), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 5), (1, 2, 3))
verify_any_dilate(any_dims(3), (3, 7, 5), (1, 2, 3))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4), 1.0)
def verify_any_softmax(data_shape, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.softmax(data, axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_softmax():
verify_any_softmax(any_dims(3), -1, (1, 2, 3), (1, 2, 3))
verify_any_softmax(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_topk(data_shape, kval, np_dshape, dtype, const_k=False):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype=dtype)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
if const_k:
k = relay.const(kval)
args = [data]
in_vals = [np_data]
else:
k = relay.var("k", shape=(), dtype="int32")
args = [data, k]
in_vals = [np_data, kval]
out = relay.topk(data, k, ret_type="indices")
mod["main"] = relay.Function(args, out)
sorted = np.argsort(-np_data)
if len(np_dshape) == 2:
ref_out = sorted[:, 0:kval]
else:
ref_out = sorted[0:kval]
for kind in ["debug", "vm"]:
ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
result = ex.evaluate()(*in_vals)
tvm.testing.assert_allclose(result.asnumpy(), ref_out)
# TODO(@zhiics) Fix topk cuda schedule for dynamic inputs
# check_result(in_vals, mod, ref_out)
def test_any_topk():
verify_any_topk(any_dims(1), 5, (10,), "float32")
verify_any_topk(any_dims(2), 2, (6, 3), "int32")
verify_any_topk(any_dims(2), 3, (6, 3), "float32", True)
@tvm.testing.uses_gpu
def test_fused_ops():
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32")
y0 = x + relay.const(1.0, "float32")
y1 = y0 * relay.const(2.0, "float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(5, 4)).astype("float32")
check_result([data], mod, (data + 1) * 2)
@tvm.testing.uses_gpu
def test_arange_with_dynamic_shape():
# m, n, k = relay.ShapeVar('m'), relay.ShapeVar('n'), relay.ShapeVar('k')
m, n, k = relay.Any(), relay.Any(), relay.Any()
x = relay.var("x", shape=(m, n, k), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(0, "int32"))
y2 = relay.op.arange(y1, dtype="int32")
y3 = y2 + relay.const(1, dtype="int32")
data = np.random.rand(10, 5, 3).astype("float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y3)
check_result([data], mod, np.array(range(10)).astype("int32") + 1)
def verify_any_strided_slice(
data_shape,
begin_shape,
end_shape,
strides_shape,
data_np_shape,
slice_mode="end",
const_attrs=False,
):
# Generate random numpy input data
np_data = np.random.uniform(size=data_np_shape).astype("float32")
np_begin = np.random.randint(2, size=begin_shape, dtype="int32")
np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
np_strides = np.random.randint(
1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32"
)
# target numpy result
ref_res = tvm.topi.testing.strided_slice_python(
np_data, np_begin, np_end, np_strides, slice_mode
)
# Relay Module
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype="float32")
if const_attrs:
data = relay.var("data", shape=data_np_shape, dtype="float32")
begin = relay.const(np_begin)
end = relay.const(np_end)
strides = relay.const(np_strides)
args = [data]
np_inputs = [np_data]
else:
begin = relay.var("begin", shape=begin_shape, dtype="int32")
end = relay.var("end", shape=end_shape, dtype="int32")
strides = relay.var("strides", shape=strides_shape, dtype="int32")
args = [data, begin, end, strides]
np_inputs = [np_data, np_begin, np_end, np_strides]
y = relay.strided_slice(data, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
mod["main"] = relay.Function(args, y)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_strided_slice():
verify_any_strided_slice(any_dims(2), (2,), (2,), (2,), (15, 21))
verify_any_strided_slice(any_dims(3), (3,), (3,), (3,), (15, 17, 21))
verify_any_strided_slice(any_dims(3), (3,), (3,), (3,), (23, 29, 41))
verify_any_strided_slice(any_dims(4), (4,), (4,), (4,), (40, 50, 60, 70))
verify_any_strided_slice(any_dims(3), (3,), (3,), (3,), (15, 17, 21), slice_mode="size")
verify_any_strided_slice(any_dims(2), (2,), (2,), (2,), (15, 21), const_attrs=True)
@tvm.testing.uses_gpu
def test_recursive_concat():
"""
fn @concat_loop(%i: int32, %st: (any, 1)) -> (any, 1) {
if (%i < 10) {
let %i = reshape(cast(i, "float32"), newshape=(1, ))
let %new_st = concatenate((st, i), axis=0)
concat_loop(%i + 1, )
} else {
st
}
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(relay.Any(), 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
mod = tvm.IRModule()
mod["main"] = func
data = np.array(0.0, dtype="int32")
ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32")
check_result([data], mod, ref)
@tvm.testing.uses_gpu
def test_recursive_concat_with_wrong_annotation():
"""
v0.0.1
fn (%start: int32) {
%7 = {
let %while_loop = fn (%i: int32, %st: Tensor[(1, 1), int32]) {
%0 = less(%i, 10)
%1 = min(%0)
if (%1) {
%2 = add(%i, 1)
%3 = reshape(%i, newshape=[1, 1])
%4 = (%st, %3)
/* The result of concat should be 1,1 but it is 2, 1. */
%5 = concatenate(%4)
%while_loop(%2, %5)
} else {
(%i, %st)
}
}
%6 = reshape(0, newshape=[1, 1])
%while_loop(%start, %6)
}
%7.1
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(1, 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
with DiagnosticTesting() as diagnostics:
diagnostics.assert_message("in particular dimension 0 conflicts 2 does not match 1")
func = infer_type(func)
@tvm.testing.uses_gpu
def test_tuple_get_item():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
indices_or_sections = 2
axis = 1
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
y = relay.expr.TupleGetItem(y.astuple(), 0)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 2)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_mixed_input_type():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
tensor_type = relay.TensorType(data_shape, dtype)
tuple_type = relay.TupleType([tensor_type, tensor_type])
data0 = relay.var("d0", type_annotation=relay.TupleType([tuple_type, tensor_type]))
data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
data_tuple = relay.expr.TupleWrapper(data0, 2)
nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
y = nested_data_tuple[1] * data_tuple[1] + data1
mod["main"] = relay.Function([data0, data1], y)
data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 4)
check_result(
[[[data_np0, data_np0], data_np0], data_np1],
mod,
ref_out_shape,
assert_shape=True,
only_vm=True,
)
def verify_any_crop_and_resize(
data_shape,
boxes_shape,
box_indices_shape,
crop_size,
layout,
static_boxes,
static_box_indices_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
indices_dtype = "int32"
data = relay.var("data", shape=data_shape, dtype=dtype)
boxes = relay.var("boxes", shape=boxes_shape, dtype=dtype)
box_indices = relay.var("box_indices", shape=box_indices_shape, dtype=indices_dtype)
y = relay.image.crop_and_resize(data, boxes, box_indices, crop_size, layout)
mod["main"] = relay.Function([data, boxes, box_indices], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
check_result([data_np, boxes_np, box_indices_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_crop_and_resize():
verify_any_crop_and_resize(
data_shape=(1, 234, 234, 256),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NHWC",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 14, 14, 256),
)
verify_any_crop_and_resize(
data_shape=(1, 256, 234, 234),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NCHW",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 256, 14, 14),
)
def verify_any_mirror_pad(data_shape, pad_width, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.mirror_pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_mirror_pad():
verify_any_mirror_pad(
data_shape=(1, 256, 232, 232),
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
static_data_shape=(1, 256, 232, 232),
ref_out_shape=(1, 256, 234, 234),
)
def verify_any_ndarray_size(data_np_shape):
v = relay.var("v", shape=any_dims(len(data_np_shape)), dtype="float32")
n = relay.ndarray_size(v, dtype="int32")
mod = tvm.IRModule()
mod["main"] = relay.Function([v], n)
np_data = np.zeros(data_np_shape, dtype="float32")
ref_res = np.size(np_data)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_ndarray_size():
verify_any_ndarray_size((2,))
verify_any_ndarray_size((2, 2))
verify_any_ndarray_size((1, 2, 3, 4))
def test_any_consecutive_broadcast():
dtype = "float32"
data0 = relay.var("data0", shape=any_dims(2), dtype=dtype)
data1 = relay.var("data1", shape=any_dims(2), dtype=dtype)
data2 = relay.var("data2", shape=any_dims(2), dtype=dtype)
data3 = relay.var("data3", shape=any_dims(2), dtype=dtype)
out0 = data0 + data1
out1 = data0 * data1
out2 = out0 - out1
out3 = data2 + data3
out4 = data2 * data3
out5 = out3 - out4
out6 = out2 * out5
mod = tvm.IRModule()
mod["main"] = relay.Function([data0, data1, data2, data3], out6)
np_data0 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 4)).astype(dtype)
np_data2 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data3 = np.random.uniform(size=(2, 4)).astype(dtype)
ref_res = ((np_data0 + np_data1) - (np_data0 * np_data1)) * (
(np_data2 + np_data3) - (np_data2 * np_data3)
)
check_result([np_data0, np_data1, np_data2, np_data3], mod, ref_res)
def test_reshape_concat():
dtype = "float32"
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate([relay.op.reshape(d0, [-1]), relay.op.reshape(d1, [-1])], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 5, 2)).astype(dtype)
ref_res = np.concatenate([np.reshape(np_data0, [-1]), np.reshape(np_data1, [-1])], axis=0)
check_result([np_data0, np_data1], mod, ref_res)
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(2), dtype=dtype)
s0 = relay.var("s0", shape=any_dims(3), dtype=dtype)
s1 = relay.var("s1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate(
[relay.op.reshape_like(d0, s0), relay.op.reshape_like(d1, s1)], axis=0
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1, s0, s1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(8, 5)).astype(dtype)
np_shape_like0 = np.random.uniform(size=(2, 2, 5)).astype(dtype)
np_shape_like1 = np.random.uniform(size=(4, 2, 5)).astype(dtype)
ref_res = np.concatenate(
[np.reshape(np_data0, np_shape_like0.shape), np.reshape(np_data1, np_shape_like1.shape)],
axis=0,
)
check_result([np_data0, np_data1, np_shape_like0, np_shape_like1], mod, ref_res)
def test_any_adv_index():
data = relay.var("data", shape=(5, relay.Any(), relay.Any()), dtype="float32")
index0 = relay.var("index0", shape=(1, relay.Any()), dtype="int64")
index1 = relay.var("index1", shape=(1, relay.Any()), dtype="int64")
out = relay.adv_index([data, index0, index1])
mod = tvm.IRModule()
mod["main"] = relay.Function([data, index0, index1], out)
np_data_shape = (5, 5, 10)
np_index_shape = (1, 4)
np_data = np.random.uniform(size=np_data_shape).astype("float32")
np_index = np.random.uniform(0, np_data_shape[0], size=np_index_shape).astype("int64")
ref_res = np_data[tuple([np_index, np_index])]
check_result([np_data, np_index, np_index], mod, ref_res)
def verify_any_repeat(data_shape, np_dshape, repeats, axis):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.repeat(data, repeats, axis)
mod["main"] = relay.Function([data], y)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
ref_res = np.repeat(np_data, repeats, axis)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_repeat():
verify_any_repeat(any_dims(2), (1, 2), 2, 0)
verify_any_repeat(any_dims(1), (3,), 3, -1)
verify_any_repeat(any_dims(4), (2, 1, 1, 4), 4, 2)
def verify_any_stack(data_shape, np_dshape, num_data, axis):
mod = tvm.IRModule()
dtype = "float32"
inputs = []
for i in range(num_data):
inputs.append(relay.var("data{}".format(i), shape=data_shape, dtype=dtype))
y = relay.stack(inputs, axis)
mod["main"] = relay.Function(inputs, y)
np_inputs = []
for _ in range(num_data):
np_inputs.append(np.random.uniform(size=np_dshape).astype(dtype))
ref_res = np.stack(np_inputs, axis)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_stack():
verify_any_stack(any_dims(2), (1, 2), 3, 0)
verify_any_stack(any_dims(1), (3,), 4, -1)
verify_any_stack(any_dims(4), (2, 1, 1, 4), 2, 2)
if __name__ == "__main__":
pytest.main([__file__])
| |
#
# based on
# https://github.com/spacetelescope/understanding-json-schema/blob/master/source/sphinxext/jsonschemaext.py
#
# Copyright (c) 2013, Space Telescope Science Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Space Telescope Science Institute nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
from docutils import nodes
from docutils import statemachine
from docutils.parsers.rst import Directive
from sphinx.util.nodes import set_source_info
import difflib
import jsonschema
import jinja2
readble_template = jinja2.Template("""
<dl class='jsonschematable'>
{%- for item in jss recursive %}
<dt class='jss_title h4'>{{ item.title }}</dt>
<dd>
<dl class='jsonschematable'>
<dt class='jss_desc text-primary'>Description:</dt>
<dd>{{ item.description }}</dd>
<dt class='jss_type text-primary'>Type:</dt>
<dd>{{ item.type }}</dd>
{%- if item.properties -%}
<dt class='jss_prop text-primary'>Properties:</dt><dd>
<dl>
{{ loop(item['properties'].values())|indent }}
</dl>
</dd>
{%- endif -%}
</dl>
</dd>
{%- endfor %}
</dl>
""")
class jsonschema_node(nodes.Element):
pass
class diff_node(nodes.Element):
pass
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def pprint_json(jsdoc):
"""
Pretty-print a json document
Parameters
----------
jsdoc : dict
The json document to be pretty printed
Returns
-------
pprint_str : str
The json document 'pretty printed' as single string
with '\\n'
"""
return json.dumps(jsdoc, sort_keys=True, indent=4,
separators=(',', ': '))
class _baseSchemaDirective(Directive):
has_content = True
validate = True
def split_content(self, input_string):
parts = []
should_pass = True
part = []
comment = []
def add_part():
content = '\n'.join(part)
if len(part) == 1:
try:
rst_file = self.state_machine.document.attributes['source']
test_path = os.path.join(os.path.dirname(rst_file),
content)
with open(test_path, 'r') as fin:
content = '\n'.join(fin)
except FileNotFoundError:
pass
try:
json_content = json.loads(content)
except ValueError:
if should_pass:
raise ValueError("Invalid json: {0}".format(content))
else:
# A complex number will never validate
json_content = 1+1j
parts.append(AttrDict({
'content': content,
'json': json_content,
'comment': comment}))
for line in input_string:
line = line.strip()
if line.startswith('//'):
line = line[2:].lstrip()
if line:
comment.append(line)
elif line == '--':
add_part()
part = []
comment = []
else:
if line:
part.append(line)
add_part()
return parts
class SchemaDiffDirective(_baseSchemaDirective):
def run(self):
result = []
parts = self.split_content(self.content)
for part in parts:
if len(part.comment):
paragraph = nodes.paragraph('', '')
comment = statemachine.StringList(part.comment)
comment.parent = self.content.parent
self.state.nested_parse(comment, 0, paragraph)
paragraph['classes'] = ['jsonschema-comment']
set_source_info(self, paragraph)
result.append(paragraph)
container = jsonschema_node()
container['raw_json'] = part.json
set_source_info(self, container)
pprint_content = pprint_json(part.json)
literal = nodes.literal_block(
pprint_content, pprint_content)
literal['language'] = 'json'
set_source_info(self, literal)
container.children.append(literal)
result.append(container)
for indx, part in enumerate(parts):
for other_part in parts[(indx + 1):]:
p1 = pprint_json(part.json).split('\n')
p2 = pprint_json(other_part.json).split('\n')
diff_str = '\n'.join(difflib.unified_diff(p2, p1,
lineterm='',
fromfile=(other_part.comment[0]
if other_part.comment else ''),
tofile=(part.comment[0]
if part.comment else ''),))
container = diff_node()
set_source_info(self, container)
literal = nodes.literal_block(
diff_str, diff_str)
literal['language'] = 'diff'
set_source_info(self, literal)
container.children.append(literal)
result.append(container)
return result
def visit_jsonschema_node_html(self, node):
pass
def _ensure_title(input_dict):
return_dict = dict(input_dict)
if 'properties' in input_dict:
props = return_dict['properties']
for k, v in props.items():
if 'title' not in v:
v['title'] = k
if 'properties' in v:
v['properties'] = _ensure_title[v]
return return_dict
def depart_jsonschema_node_html(self, node):
form_dict = _ensure_title(node['raw_json'])
self.body.append(readble_template.render(
jss=[form_dict]))
def visit_jsonschema_node_latex(self, node):
adjust = False
color = "gray"
char = ""
if 'jsonschema-pass' in node['classes']:
char = r"\Checkmark"
color = "ForestGreen"
adjust = True
elif 'jsonschema-fail' in node['classes']:
char = r"\XSolidBrush"
color = "BrickRed"
adjust = True
elif 'jsonschema' in node['classes']:
char = r"\{ json schema \}"
if adjust:
self.body.append(r"\begin{adjustwidth}{2.5em}{0pt}")
self.body.append(r"\begin{jsonframe}{%s}{%s}" % (char, color))
def depart_jsonschema_node_latex(self, node):
adjust = False
if 'jsonschema-pass' in node['classes']:
adjust = True
elif 'jsonschema-fail' in node['classes']:
adjust = True
self.body.append(r"\end{jsonframe}")
if adjust:
self.body.append(r"\end{adjustwidth}")
def visit_diff_node_html(self, node):
pass
def depart_diff_node_html(self, node):
pass
def visit_diff_node_latex(self, node):
pass
def depart_diff_node_latex(self, node):
pass
def setup(app):
app.add_directive('schema_diff', SchemaDiffDirective)
app.add_node(jsonschema_node,
html=(visit_jsonschema_node_html,
depart_jsonschema_node_html),
latex=(visit_jsonschema_node_latex,
depart_jsonschema_node_latex))
app.add_node(diff_node,
html=(visit_diff_node_html, depart_diff_node_html),
latex=(visit_diff_node_latex, depart_diff_node_latex))
latex_preamble = r"""
\usepackage{changepage}
\usepackage[dvipsnames]{xcolor}
"""
| |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension import base
def make_data():
return list(range(1, 9)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return integer_array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return integer_array([1, np.nan, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are np.nan
return lambda x, y: np.isnan(x) and np.isnan(y)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = np.nan
return integer_array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
@pytest.mark.skip(reason="using multiple dtypes")
def test_is_dtype_unboxes_dtype(self):
# we have multiple dtypes, so skip
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
if s.dtype.is_unsigned_integer and (op_name == "__rsub__"):
# TODO see https://github.com/pandas-dev/pandas/issues/22023
pytest.skip("unsigned subtraction gives negative values")
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and pd.api.types.is_integer_dtype(other.dtype)
):
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(s.dtype.numpy_dtype)
result = op(s, other)
expected = s.combine(other, op)
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
expected = expected.astype(float)
if op_name == "__rtruediv__":
# TODO reverse operators result in object dtype
result = result.astype(float)
elif op_name.startswith("__r"):
# TODO reverse operators result in object dtype
# see https://github.com/pandas-dev/pandas/issues/22024
expected = expected.astype(s.dtype)
result = result.astype(s.dtype)
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(s.dtype)
pass
if (op_name == "__rpow__") and isinstance(other, pd.Series):
# TODO pow on Int arrays gives different result with NA
# see https://github.com/pandas-dev/pandas/issues/22022
result = result.fillna(1)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
@pytest.mark.skip(reason="intNA does not error on ops")
def test_error(self, data, all_arithmetic_operators):
# other specific errors tested in the integer array specific tests
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
# for test_concat_mixed_dtypes test
# concat of an Integer and Int coerces to object dtype
# TODO(jreback) once integrated this would
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
expected.index = expected.index.astype(all_data.dtype)
self.assert_series_equal(result, expected)
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
pass
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Cyan, Inc.
# Copyright 2016, 2017, 2018 Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from numbers import Integral
from twisted.internet.defer import (
CancelledError, Deferred, fail, inlineCallbacks, maybeDeferred, succeed,
)
from twisted.internet.task import LoopingCall
from twisted.python.failure import Failure
from afkak._util import _coerce_consumer_group, _coerce_topic
from afkak.common import (
OFFSET_COMMITTED, OFFSET_EARLIEST, OFFSET_LATEST, OFFSET_NOT_COMMITTED,
TIMESTAMP_INVALID, ConsumerFetchSizeTooSmall, FetchRequest,
IllegalGeneration, InvalidConsumerGroupError, InvalidGroupId, KafkaError,
OffsetCommitRequest, OffsetFetchRequest, OffsetOutOfRangeError,
OffsetRequest, OperationInProgress, RestartError, RestopError,
SourcedMessage, UnknownMemberId,
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
REQUEST_RETRY_MIN_DELAY = 0.1 # Initial wait to retry a request after failure
REQUEST_RETRY_MAX_DELAY = 30.0 # When retrying a request, max delay (seconds)
REQUEST_RETRY_FACTOR = 1.20205 # Factor by which we increase our delay
FETCH_MIN_BYTES = 64 * 1024 # server waits for min. 64K bytes of messages
FETCH_MAX_WAIT_TIME = 100 # server waits 100 millisecs for messages
FETCH_BUFFER_SIZE_BYTES = 128 * 1024 # Our initial fetch buffer size
# How often we auto-commit (msgs, millisecs)
AUTO_COMMIT_MSG_COUNT = 100
AUTO_COMMIT_INTERVAL = 5000
class Consumer(object):
"""A simple Kafka consumer implementation
This consumer consumes a single partition from a single topic, optionally
automatically committing offsets. Use it as follows:
- Create an instance of :class:`afkak.KafkaClient` with cluster
connectivity details.
- Create the :class:`Consumer`, supplying the client, topic, partition,
processor function, and optionally fetch specifics, a consumer group,
and a commit policy.
- Call :meth:`.start` with the offset within the partition at which to
start consuming messages. See :meth:`.start` for details.
- Process the messages in your :attr:`.processor` callback, returning a
:class:`~twisted.internet.defer.Deferred` to provide backpressure as
needed.
- Once processing resolves, :attr:`.processor` will be called again with
the next batch of messages.
- When desired, call :meth:`.shutdown` on the :class:`Consumer` to halt
calls to the :attr:`processor` function and commit progress (if
a *consumer_group* is specified).
A :class:`Consumer` may be restarted once stopped.
:ivar client:
Connected :class:`KafkaClient` for submitting requests to the Kafka
cluster.
:ivar str topic:
The topic from which to consume messages.
:ivar int partition:
The partition from which to consume.
:ivar callable processor:
The callback function to which the consumer and lists of messages
(:class:`afkak.common.SourcedMessage`) will be submitted
for processing. The function may return
a :class:`~twisted.internet.defer.Deferred` and will not be called
again until this Deferred resolves.
:ivar str consumer_group:
Optional consumer group ID for committing offsets of processed
messages back to Kafka.
:ivar bytes commit_metadata:
Optional metadata to store with offsets commit.
:ivar int auto_commit_every_n:
Number of messages after which the consumer will automatically
commit the offset of the last processed message to Kafka. Zero
disables, defaulted to :data:`AUTO_COMMIT_MSG_COUNT`.
:ivar int auto_commit_every_ms:
Time interval in milliseconds after which the consumer will
automatically commit the offset of the last processed message to
Kafka. Zero disables, defaulted to :data:`AUTO_COMMIT_INTERVAL`.
:ivar int fetch_size_bytes:
Number of bytes to request in a :class:`FetchRequest`. Kafka will
defer fulfilling the request until at least this many bytes can be
returned.
:ivar int fetch_max_wait_time:
Max number of milliseconds the server should wait for that many
bytes.
:ivar int buffer_size:
default 128K. Initial number of bytes to tell Kafka we have
available. This will be raised x16 up to 1MB then double up to...
:ivar int max_buffer_size:
Max number of bytes to tell Kafka we have available. `None` means
no limit (the default). Must be larger than the largest message we
will find in our topic/partitions.
:ivar float request_retry_init_delay:
Number of seconds to wait before retrying a failed request to
Kafka.
:ivar float request_retry_max_delay:
Maximum number of seconds to wait before retrying a failed request
to Kafka (the delay is increased on each failure and reset to the
initial delay upon success).
:ivar int request_retry_max_attempts:
Maximum number of attempts to make for any request. Default of zero
means retry forever; other values must be positive and indicate
the number of attempts to make before returning failure.
:ivar int auto_offset_reset:
What action should be taken when the broker responds to a fetch request
with `OffsetOutOfRangeError`?
- `OFFSET_EARLIEST`: request the oldest available messages. The
consumer will read every message in the topic.
- `OFFSET_LATEST`: request the most recent messages (this is the Java
consumer's default). The consumer will read messages once new
messages are produced to the topic.
- `None`: fail on `OffsetOutOfRangeError` (Afkak's default). The
`Deferred` returned by :meth:`Consumer.start()` will errback. The
caller may call :meth:`~.start()` again with the desired offset.
The broker returns `OffsetOutOfRangeError` when the client requests an
offset that isn't valid. This may mean that the requested offset no
longer exists, e.g. if it was removed due to age.
"""
def __init__(self, client, topic, partition, processor,
consumer_group=None,
commit_metadata=None,
auto_commit_every_n=None,
auto_commit_every_ms=None,
fetch_size_bytes=FETCH_MIN_BYTES,
fetch_max_wait_time=FETCH_MAX_WAIT_TIME,
buffer_size=FETCH_BUFFER_SIZE_BYTES,
max_buffer_size=None,
request_retry_init_delay=REQUEST_RETRY_MIN_DELAY,
request_retry_max_delay=REQUEST_RETRY_MAX_DELAY,
request_retry_max_attempts=0,
auto_offset_reset=None,
commit_consumer_id='',
commit_generation_id=-1):
# Store away parameters
self.client = client # KafkaClient
self.topic = topic = _coerce_topic(topic)
self.partition = partition # The partition within the topic we consume
self.processor = processor # The callback we call with the msg list
# Commit related parameters (Ensure the attr. exist, even if None)
if consumer_group is not None:
consumer_group = _coerce_consumer_group(consumer_group)
self.consumer_group = consumer_group
self.commit_metadata = commit_metadata
if commit_metadata is not None and not isinstance(commit_metadata, bytes):
raise TypeError('commit_metadata={!r} should be bytes'.format(
commit_metadata))
# commit related parameters when using a coordinated consumer group
self.commit_consumer_id = commit_consumer_id
self.commit_generation_id = commit_generation_id
self.auto_commit_every_n = None
self.auto_commit_every_s = None
if consumer_group:
# Auto committing is possible...
if auto_commit_every_n is None:
auto_commit_every_n = AUTO_COMMIT_MSG_COUNT
if auto_commit_every_ms is None:
auto_commit_every_ms = AUTO_COMMIT_INTERVAL
if not isinstance(auto_commit_every_n, Integral):
raise ValueError('auto_commit_every_n parameter must be '
'subtype of Integral')
if not isinstance(auto_commit_every_ms, Integral):
raise ValueError('auto_commit_every_ms parameter must be '
'subtype of Integral')
if auto_commit_every_ms < 0 or auto_commit_every_n < 0:
raise ValueError('auto_commit_every_ms and auto_commit_every_n'
' must be non-negative')
self.auto_commit_every_n = auto_commit_every_n
self.auto_commit_every_s = float(auto_commit_every_ms) / 1000
else:
if auto_commit_every_ms or auto_commit_every_n:
raise ValueError('An auto_commit_every_x argument set without '
'specifying consumer_group')
# Fetch related instance variables
self.fetch_min_bytes = fetch_size_bytes
self.fetch_max_wait_time = int(fetch_max_wait_time)
self.buffer_size = buffer_size
self.max_buffer_size = max_buffer_size
# request retry timing
self.retry_delay = float(request_retry_init_delay) # fetch only
self.retry_init_delay = float(request_retry_init_delay)
self.retry_max_delay = float(request_retry_max_delay)
self.request_retry_max_attempts = int(request_retry_max_attempts)
if (not isinstance(request_retry_max_attempts, Integral) or
request_retry_max_attempts < 0):
raise ValueError(
'request_retry_max_attempts must be non-negative integer')
self._fetch_attempt_count = 1
if auto_offset_reset not in [None, OFFSET_EARLIEST, OFFSET_LATEST]:
raise ValueError(
"auto_offset_reset must be in 'None', 'OFFSET_EARLIEST', 'OFFSET_LATEST'")
self.auto_offset_reset = auto_offset_reset
# # Internal state tracking attributes
self._fetch_offset = None # We don't know at what offset to fetch yet
self._last_processed_offset = None # Last msg processed offset
self._last_committed_offset = None # The last offset stored in Kafka
self._stopping = False # We're not stopping yet...
self._shuttingdown = False # We're not shutting down either
self._shutdown_d = None # deferred for tracking shutdown request
self._commit_looper = None # Looping call for auto-commit
self._commit_looper_d = None # Deferred for running looping call
self._commit_ds = [] # Deferreds to notify when commit completes
self._commit_req = None # Track outstanding commit request
# For tracking various async operations
self._start_d = None # deferred for alerting user of errors
self._request_d = None # outstanding KafkaClient request deferred
self._retry_call = None # IDelayedCall object for delayed retries
self._commit_call = None # IDelayedCall for delayed commit retries
self._msg_block_d = None # deferred for each block of messages
self._processor_d = None # deferred for a result from processor
self._state = 'initialized' # Keep track of state for debugging
# Check parameters for sanity
if max_buffer_size is not None and buffer_size > max_buffer_size:
raise ValueError("buffer_size (%d) is greater than "
"max_buffer_size (%d)" %
(buffer_size, max_buffer_size))
if not isinstance(self.partition, Integral):
raise ValueError('partition parameter must be subtype of Integral')
def __repr__(self):
return '<{} {}/{} {}>'.format(
self.__class__.__name__, self.topic, self.partition, self._state,
)
# TODO Add commit_consumer_id if applicable
@property
def last_processed_offset(self):
"""
Offset of the last message that was successfully processed, or `None`
if no message has been processed yet (read-only). This is updated only
once the processor function returns and any deferred it returns
succeeds.
:rtype: Optional[int]
"""
return self._last_processed_offset
@property
def last_committed_offset(self):
"""
The last offset that was successfully commited to Kafka, or `None` if
no offset has been committed yet (read-only).
:rtype: Optional[int]
"""
return self._last_committed_offset
def start(self, start_offset):
"""
Starts fetching messages from Kafka and delivering them to the
:attr:`.processor` function.
:param int start_offset:
The offset within the partition from which to start fetching.
Special values include: :const:`OFFSET_EARLIEST`,
:const:`OFFSET_LATEST`, and :const:`OFFSET_COMMITTED`. If the
supplied offset is :const:`OFFSET_EARLIEST` or
:const:`OFFSET_LATEST` the :class:`Consumer` will use the
OffsetRequest Kafka API to retrieve the actual offset used for
fetching. In the case :const:`OFFSET_COMMITTED` is used,
`commit_policy` MUST be set on the Consumer, and the Consumer
will use the OffsetFetchRequest Kafka API to retrieve the actual
offset used for fetching.
:returns:
:class:`~twisted.internet.defer.Deferred` that will fire when the
consumer is stopped:
* It will succeed with the value of
:attr:`last_processed_offset`, or
* Fail when the :class:`Consumer` encounters an error from which
it is unable to recover, such as an exception thrown by the
processor or an unretriable broker error.
:raises: :exc:`RestartError` if already running.
"""
# Have we been started already, and not stopped?
if self._start_d is not None:
raise RestartError("Start called on already-started consumer")
# Keep track of state for debugging
self._state = 'started'
# Create and return a deferred for alerting on errors/stoppage
start_d = self._start_d = Deferred()
# Start a new fetch request, possibly just for the starting offset
self._fetch_offset = start_offset
self._do_fetch()
# Set up the auto-commit timer, if needed
if self.consumer_group and self.auto_commit_every_s:
self._commit_looper = LoopingCall(self._auto_commit)
self._commit_looper.clock = self.client.reactor
self._commit_looper_d = self._commit_looper.start(
self.auto_commit_every_s, now=False)
self._commit_looper_d.addCallbacks(self._commit_timer_stopped,
self._commit_timer_failed)
return start_d
def shutdown(self):
"""Gracefully shutdown the consumer
Consumer will complete any outstanding processing, commit its current
offsets (if so configured) and stop.
:returns: :class:`Deferred` that fires with the value of
:attr:`last_processed_offset`. It may fail if a commit fails or
with :exc:`RestopError` if the consumer is not running.
"""
def _handle_shutdown_commit_success(result):
"""Handle the result of the commit attempted by shutdown"""
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.callback(self._last_processed_offset)
def _handle_shutdown_commit_failure(failure):
"""Handle failure of commit() attempted by shutdown"""
if failure.check(OperationInProgress):
failure.value.deferred.addCallback(_commit_and_stop)
return
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.errback(failure)
def _commit_and_stop(result):
"""Commit the current offsets (if needed) and stop the consumer"""
if not self.consumer_group: # No consumer group, no committing
return _handle_shutdown_commit_success(None)
# Need to commit prior to stopping
self.commit().addCallbacks(_handle_shutdown_commit_success,
_handle_shutdown_commit_failure)
# If we're not running, return an failure
if self._start_d is None:
return fail(Failure(
RestopError("Shutdown called on non-running consumer")))
# If we're called multiple times, return a failure
if self._shutdown_d:
return fail(Failure(
RestopError("Shutdown called more than once.")))
# Set our _shuttingdown flag, so our _process_message routine will stop
# feeding new messages to the processor, and fetches won't be retried
self._shuttingdown = True
# Keep track of state for debugging
self._state = 'shutting down'
# TODO: This was added as part of coordinated consumer support,
# but it belongs in the constructor if it is even necessary.
# don't let commit requests retry forever and prevent shutdown
if not self.request_retry_max_attempts:
self.request_retry_max_attempts = 2
# Create a deferred to track the shutdown
self._shutdown_d = d = Deferred()
# Are we waiting for the processor to complete? If so, when it's done,
# commit our offsets and stop.
if self._processor_d:
self._processor_d.addCallback(_commit_and_stop)
else:
# No need to wait for the processor, we can commit and stop now
_commit_and_stop(None)
# return the deferred
return d
def stop(self):
"""
Stop the consumer and return offset of last processed message. This
cancels all outstanding operations. Also, if the deferred returned
by `start` hasn't been called, it is called with the value of
:attr:`last_processed_offset`.
:raises: :exc:`RestopError` if the :class:`Consumer` is not running.
"""
if self._start_d is None:
raise RestopError("Stop called on non-running consumer")
self._stopping = True
# Keep track of state for debugging
self._state = 'stopping'
# Are we waiting for a request to come back?
if self._request_d:
self._request_d.cancel()
# Are we working our way through a block of messages?
if self._msg_block_d:
# Need to add a cancel handler...
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.addErrback(lambda fail: fail.trap(CancelledError))
_msg_block_d.cancel()
# Are we waiting for the processor to complete?
if self._processor_d:
self._processor_d.cancel()
# Are we waiting to retry a request?
if self._retry_call:
self._retry_call.cancel()
# Are we waiting on a commit request?
if self._commit_ds:
while self._commit_ds:
d = self._commit_ds.pop()
d.cancel()
if self._commit_req:
self._commit_req.cancel()
# Are we waiting to retry a commit?
if self._commit_call:
self._commit_call.cancel()
# Do we have an auto-commit looping call?
if self._commit_looper is not None:
self._commit_looper.stop()
# Done stopping
self._stopping = False
# Keep track of state for debugging
self._state = 'stopped'
# Clear and possibly callback our start() Deferred
self._start_d, d = None, self._start_d
if not d.called:
d.callback(self._last_processed_offset)
# Return the offset of the message we last processed.
return self._last_processed_offset
def commit(self):
"""
Commit the last processed offset
Immediately commit the value of :attr:`last_processed_offset` if it
differs from :attr:`last_committed_offset`.
.. note::
It is possible to commit a smaller offset than Kafka has stored.
This is by design, so we can reprocess a Kafka message stream if
desired.
On error, will retry according to :attr:`request_retry_max_attempts`
(by default, forever).
If called while a commit operation is in progress, and new messages
have been processed since the last request was sent then the commit
will fail with :exc:`OperationInProgress`. The
:exc:`OperationInProgress` exception wraps
a :class:`~twisted.internet.defer.Deferred` which fires when the
outstanding commit operation completes.
:returns:
A :class:`~twisted.internet.defer.Deferred` which resolves with the
committed offset when the operation has completed. It will resolve
immediately if the current offset and the last committed offset do
not differ.
"""
# Can't commit without a consumer_group
if not self.consumer_group:
return fail(Failure(InvalidConsumerGroupError(
"Bad Group_id:{0!r}".format(self.consumer_group))))
# short circuit if we are 'up to date', or haven't processed anything
if ((self._last_processed_offset is None) or
(self._last_processed_offset == self._last_committed_offset)):
return succeed(self._last_committed_offset)
# If we're currently processing a commit we return a failure
# with a deferred we'll fire when the in-progress one completes
if self._commit_ds:
d = Deferred()
self._commit_ds.append(d)
return fail(OperationInProgress(d))
# Ok, we have processed messages since our last commit attempt, and
# we're not currently waiting on a commit request to complete:
# Start a new one
d = Deferred()
self._commit_ds.append(d)
# Send the request
self._send_commit_request()
# Reset the commit_looper here, rather than on success to give
# more stability to the commit interval.
if self._commit_looper is not None:
self._commit_looper.reset()
# return the deferred
return d
# # Private Methods # #
def _retry_auto_commit(self, result, by_count=False):
self._auto_commit(by_count)
return result
def _auto_commit(self, by_count=False):
"""Check if we should start a new commit operation and commit"""
# Check if we are even supposed to do any auto-committing
if (self._stopping or self._shuttingdown or (not self._start_d) or
(self._last_processed_offset is None) or
(not self.consumer_group) or
(by_count and not self.auto_commit_every_n)):
return
# If we're auto_committing because the timer expired, or by count and
# we don't have a record of our last_committed_offset, or we've
# processed enough messages since our last commit, then try to commit
if (not by_count or self._last_committed_offset is None or
(self._last_processed_offset - self._last_committed_offset
) >= self.auto_commit_every_n):
if not self._commit_ds:
commit_d = self.commit()
commit_d.addErrback(self._handle_auto_commit_error)
else:
# We're waiting on the last commit to complete, so add a
# callback to be called when the current request completes
d = Deferred()
d.addCallback(self._retry_auto_commit, by_count)
self._commit_ds.append(d)
def _retry_fetch(self, after=None):
"""
Schedule a delayed :meth:`_do_fetch` call after a failure
:param float after:
The delay in seconds after which to do the retried fetch. If
`None`, our internal :attr:`retry_delay` is used, and adjusted by
:const:`REQUEST_RETRY_FACTOR`.
"""
# Have we been told to stop or shutdown? Then don't actually retry.
if self._stopping or self._shuttingdown or self._start_d is None:
# Stopping, or stopped already? No more fetching.
return
if self._retry_call is None:
if after is None:
after = self.retry_delay
self.retry_delay = min(self.retry_delay * REQUEST_RETRY_FACTOR,
self.retry_max_delay)
self._fetch_attempt_count += 1
self._retry_call = self.client.reactor.callLater(
after, self._do_fetch)
def _handle_offset_response(self, responses):
"""
Handle responses to both OffsetRequest and OffsetFetchRequest, since
they are similar enough.
:param responses:
A tuple of a single OffsetFetchResponse or OffsetResponse
"""
# Got a response, clear our outstanding request deferred
self._request_d = None
# Successful request, reset our retry delay, count, etc
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
[response] = responses
if hasattr(response, 'offsets'):
# It's a response to an OffsetRequest
self._fetch_offset = response.offsets[0]
else:
# It's a response to an OffsetFetchRequest
# Make sure we got a valid offset back. Kafka uses -1 to indicate
# no committed offset was retrieved
if response.offset == OFFSET_NOT_COMMITTED:
if self.auto_offset_reset == OFFSET_LATEST:
self._fetch_offset = OFFSET_LATEST
else:
self._fetch_offset = OFFSET_EARLIEST
else:
self._fetch_offset = response.offset + 1
self._last_committed_offset = response.offset
self._do_fetch()
def _handle_offset_error(self, failure):
"""
Retry the offset fetch request if appropriate.
Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we
log a warning. This should perhaps be extended to abort sooner on
certain errors.
"""
# outstanding request got errback'd, clear it
self._request_d = None
if self._stopping and failure.check(CancelledError):
# Not really an error
return
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
self._fetch_attempt_count >= self.request_retry_max_attempts):
log.debug(
"%r: Exhausted attempts: %d fetching offset from kafka",
self, self.request_retry_max_attempts,
exc_info=(failure.type, failure.value, failure.getTracebackObject()),
)
self._start_d.errback(failure)
return
# Decide how to log this failure... If we have retried so many times
# we're at the retry_max_delay, then we log at warning every other time
# debug otherwise
if (self.retry_delay < self.retry_max_delay or
0 == (self._fetch_attempt_count % 2)):
log.debug("%r: Failure fetching offset from kafka: %r", self,
failure)
else:
# We've retried until we hit the max delay, log at warn
log.warning("%r: Still failing fetching offset from kafka: %r",
self, failure)
self._retry_fetch()
def _clear_processor_deferred(self, result):
self._processor_d = None # It has fired, we can clear it
return result
def _update_processed_offset(self, result, offset):
log.debug('%s: processor returned %r at offset %d', self, result, offset)
self._last_processed_offset = offset
self._auto_commit(by_count=True)
def _clear_commit_req(self, result):
self._commit_req = None # It has fired, we can clear it
return result
def _update_committed_offset(self, result, offset):
# successful commit request completed
self._last_committed_offset = offset
self._deliver_commit_result(offset)
return offset
def _deliver_commit_result(self, result):
# Let anyone waiting know the commit completed. Handle the case where
# they try to commit from the callback by preserving self._commit_ds
# as a local, but clearing the attribute itself.
commit_ds, self._commit_ds = self._commit_ds, []
while commit_ds:
d = commit_ds.pop()
d.callback(result)
def _send_commit_request(self, retry_delay=None, attempt=None):
"""Send a commit request with our last_processed_offset"""
# If there's a _commit_call, and it's not active, clear it, it probably
# just called us...
if self._commit_call and not self._commit_call.active():
self._commit_call = None
# Make sure we only have one outstanding commit request at a time
if self._commit_req is not None:
raise OperationInProgress(self._commit_req)
# Handle defaults
if retry_delay is None:
retry_delay = self.retry_init_delay
if attempt is None:
attempt = 1
# Create new OffsetCommitRequest with the latest processed offset
commit_offset = self._last_processed_offset
commit_request = OffsetCommitRequest(
self.topic, self.partition, commit_offset,
TIMESTAMP_INVALID, self.commit_metadata)
log.debug("Committing off=%s grp=%s tpc=%s part=%s req=%r",
self._last_processed_offset, self.consumer_group,
self.topic, self.partition, commit_request)
# Send the request, add our callbacks
self._commit_req = d = self.client.send_offset_commit_request(
self.consumer_group, [commit_request],
group_generation_id=self.commit_generation_id,
consumer_id=self.commit_consumer_id)
d.addBoth(self._clear_commit_req)
d.addCallbacks(
callback=self._update_committed_offset,
callbackArgs=(commit_offset,),
errback=self._handle_commit_error,
errbackArgs=(commit_offset, retry_delay, attempt),
)
def _handle_commit_error(self, failure, commit_offset, retry_delay, attempt):
""" Retry the commit request, depending on failure type
Depending on the type of the failure, we retry the commit request
with the latest processed offset, or callback/errback self._commit_ds
"""
# Check if we are stopping and the request was cancelled
if self._stopping and failure.check(CancelledError):
# Not really an error
return self._deliver_commit_result(self._last_committed_offset)
# Check that the failure type is a Kafka error...this could maybe be
# a tighter check to determine whether a retry will succeed...
if not failure.check(KafkaError):
log.error("Unhandleable failure during commit attempt: %r\n\t%r",
failure, failure.getBriefTraceback())
return self._deliver_commit_result(failure)
# the server may reject our commit because we have lost sync with the group
if failure.check(IllegalGeneration, InvalidGroupId, UnknownMemberId):
log.error("Unretriable failure during commit attempt: %r\n\t%r",
failure, failure.getBriefTraceback())
# we need to notify the coordinator here
self._deliver_commit_result(failure)
return
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
attempt >= self.request_retry_max_attempts):
log.debug(
"%r: Failed to commit offset %s %d times: out of retries",
self, commit_offset, self.request_retry_max_attempts,
exc_info=(failure.type, failure.value, failure.getTracebackObject()),
)
return self._deliver_commit_result(failure)
next_retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR, self.retry_max_delay)
# Check the retry_delay to see if we should log at the higher level
# Using attempts % 2 gets us 1-warn/minute with defaults timings
if retry_delay < self.retry_max_delay or 0 == (attempt % 2):
log.debug(
"%r: Failed to commit offset %s (will retry in %.2f seconds)",
self, commit_offset, next_retry_delay,
exc_info=(failure.type, failure.value, failure.getTracebackObject()),
)
else:
# We've retried until we hit the max delay, log alternately at warn
log.warning(
"%r: Failed to commit offset %s (will retry in %.2f seconds)",
self, commit_offset, next_retry_delay,
exc_info=(failure.type, failure.value, failure.getTracebackObject()),
)
# Schedule a delayed call to retry the commit
self._commit_call = self.client.reactor.callLater(
next_retry_delay, self._send_commit_request, next_retry_delay, attempt + 1)
def _handle_auto_commit_error(self, failure):
if self._start_d is not None and not self._start_d.called:
self._start_d.errback(failure)
def _handle_processor_error(self, failure):
"""Handle a failure in the processing of a block of messages
This method is called when the processor func fails while processing
a block of messages. Since we can't know how best to handle a
processor failure, we just :func:`errback` our :func:`start` method's
deferred to let our user know about the failure.
"""
# Check if we're stopping/stopped and the errback of the processor
# deferred is just the cancelling we initiated. If so, we skip
# notifying via the _start_d deferred, as it will be 'callback'd at the
# end of stop()
if not (self._stopping and failure.check(CancelledError)):
if self._start_d: # Make sure we're not already stopped
self._start_d.errback(failure)
def _handle_fetch_error(self, failure):
"""A fetch request resulted in an error. Retry after our current delay
When a fetch error occurs, we check to see if the Consumer is being
stopped, and if so just return, trapping the CancelledError. If not, we
check if the Consumer has a non-zero setting for
:attr:`request_retry_max_attempts` and if so and we have reached that limit we
errback() the Consumer's start() deferred with the failure. If not, we
determine whether to log at debug or warning (we log at warning every
other retry after backing off to the max retry delay, resulting in a
warning message approximately once per minute with the default timings)
We then wait our current :attr:`retry_delay`, and retry the fetch. We
also increase our retry_delay by Apery's constant (1.20205) and note
the failed fetch by incrementing :attr:`_fetch_attempt_count`.
NOTE: this may retry forever.
TODO: Possibly make this differentiate based on the failure
"""
# The _request_d deferred has fired, clear it.
self._request_d = None
if failure.check(OffsetOutOfRangeError):
if self.auto_offset_reset is None:
self._start_d.errback(failure)
return
self._fetch_offset = self.auto_offset_reset
if self._stopping and failure.check(CancelledError):
# Not really an error
return
# Do we need to abort?
if (self.request_retry_max_attempts != 0 and
self._fetch_attempt_count >= self.request_retry_max_attempts):
log.debug(
"%r: Exhausted attempts: %d fetching messages from kafka: %r",
self, self.request_retry_max_attempts, failure)
self._start_d.errback(failure)
return
# Decide how to log this failure... If we have retried so many times
# we're at the retry_max_delay, then we log at warning every other time
# debug otherwise
if (self.retry_delay < self.retry_max_delay or
0 == (self._fetch_attempt_count % 2)):
log.debug("%r: Failure fetching messages from kafka: %r", self,
failure)
else:
# We've retried until we hit the max delay, log at warn
log.warning("%r: Still failing fetching messages from kafka: %r",
self, failure)
self._retry_fetch()
def _handle_fetch_response(self, responses):
"""The callback handling the successful response from the fetch request
Delivers the message list to the processor, handles per-message errors
(ConsumerFetchSizeTooSmall), triggers another fetch request
If the processor is still processing the last batch of messages, we
defer this processing until it's done. Otherwise, we start another
fetch request and submit the messages to the processor
"""
# Successful fetch, reset our retry delay
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
# Check to see if we are still processing the last block we fetched...
if self._msg_block_d:
# We are still working through the last block of messages...
# We have to wait until it's done, then process this response
self._msg_block_d.addCallback(
lambda _: self._handle_fetch_response(responses))
return
# No ongoing processing, great, let's get some started.
# Request no longer outstanding, clear the deferred tracker so we
# can refetch
self._request_d = None
messages = []
try:
for resp in responses: # We should really only ever get one...
if resp.partition != self.partition:
log.warning(
"%r: Got response with partition: %r not our own: %r",
self, resp.partition, self.partition)
continue
# resp.messages is a KafkaCodec._decode_message_set_iter
# Note that 'message' here is really an OffsetAndMessage
for message in resp.messages:
# Check for messages included which are from prior to our
# desired offset: can happen due to compressed message sets
if message.offset < self._fetch_offset:
log.debug(
'Skipping message at offset: %d, because its '
'offset is less that our fetch offset: %d.',
message.offset, self._fetch_offset)
continue
# Create a 'SourcedMessage' and add it to the messages list
messages.append(
SourcedMessage(
message=message.message,
offset=message.offset, topic=self.topic,
partition=self.partition))
# Update our notion of from where to fetch.
self._fetch_offset = message.offset + 1
except ConsumerFetchSizeTooSmall:
# A message was too large for us to receive, given our current
# buffer size. Grow it until it works, or we hit our max
# Grow by 16x up to 1MB (could result in 16MB buf), then by 2x
factor = 2
if self.buffer_size <= 2**20:
factor = 16
if self.max_buffer_size is None:
# No limit, increase until we succeed or fail to alloc RAM
self.buffer_size *= factor
elif (self.max_buffer_size is not None and
self.buffer_size < self.max_buffer_size):
# Limited, but currently below it.
self.buffer_size = min(
self.buffer_size * factor, self.max_buffer_size)
else:
# We failed, and are already at our max. Nothing we can do but
# create a Failure and errback() our start() deferred
log.error("Max fetch size %d too small", self.max_buffer_size)
failure = Failure(
ConsumerFetchSizeTooSmall(
"Max buffer size:%d too small for message",
self.max_buffer_size))
self._start_d.errback(failure)
return
log.debug(
"Next message larger than fetch size, increasing "
"to %d (~2x) and retrying", self.buffer_size)
finally:
# If we were able to extract any messages, deliver them to the
# processor now.
if messages:
self._msg_block_d = Deferred()
self._process_messages(messages)
# start another fetch, if needed, but use callLater to avoid recursion
self._retry_fetch(0)
@inlineCallbacks
def _process_messages(self, messages):
"""Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
"""
# Default to processing the entire block...
proc_block_size = sys.maxsize
# Unless our auto commit_policy restricts us to process less
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
proc_block_begin = 0
proc_block_end = proc_block_size
while proc_block_begin < len(messages) and not self._shuttingdown:
msgs_to_proc = messages[proc_block_begin:proc_block_end]
# Call our processor callable and handle the possibility it returned
# a deferred...
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
# Once the processor completes, clear our _processor_d
d.addBoth(self._clear_processor_deferred)
# Record the offset of the last processed message and check autocommit
d.addCallback(self._update_processed_offset, last_offset)
# Add an error handler
d.addErrback(self._handle_processor_error)
# If we were stopped, cancel the processor deferred. Note, we have to
# do this here, in addition to in stop() because the processor func
# itself could have called stop(), and then when it returned, we re-set
# self._processor_d to the return of maybeDeferred().
if self._stopping or self._start_d is None:
d.cancel()
break
else:
yield d
proc_block_begin = proc_block_end
proc_block_end += proc_block_size
# We're done with this block. If we had another fetch result
# waiting, this callback will trigger the processing thereof.
if self._msg_block_d:
_msg_block_d, self._msg_block_d = self._msg_block_d, None
_msg_block_d.callback(True)
def _do_fetch(self):
"""Send a fetch request if there isn't a request outstanding
Sends a fetch request to the Kafka cluster to get messages at the
current offset. When the response comes back, if there are messages,
it delivers them to the :attr:`processor` callback and initiates
another fetch request. If there is a recoverable error, the fetch is
retried after :attr:`retry_delay`.
In the case of an unrecoverable error, :func:`errback` is called on the
:class:`Deferred` returned by :meth:`start()`.
"""
# Check for outstanding request.
if self._request_d:
log.debug("_do_fetch: Outstanding request: %r", self._request_d)
return
# Cleanup our _retry_call, if we have one
if self._retry_call is not None:
if self._retry_call.active():
self._retry_call.cancel()
self._retry_call = None
# Do we know our offset yet, or do we need to figure it out?
if (self._fetch_offset == OFFSET_EARLIEST or
self._fetch_offset == OFFSET_LATEST):
# We need to fetch the offset for our topic/partition
offset_request = OffsetRequest(
self.topic, self.partition, self._fetch_offset, 1)
self._request_d = self.client.send_offset_request([offset_request])
self._request_d.addCallbacks(
self._handle_offset_response, self._handle_offset_error)
elif self._fetch_offset == OFFSET_COMMITTED:
# We need to fetch the committed offset for our topic/partition
# Note we use the same callbacks, as the responses are "close
# enough" for our needs here
if not self.consumer_group:
# consumer_group must be set for OFFSET_COMMITTED
failure = Failure(
InvalidConsumerGroupError("Bad Group_id:{0!r}".format(
self.consumer_group)))
self._start_d.errback(failure)
request = OffsetFetchRequest(self.topic, self.partition)
self._request_d = self.client.send_offset_fetch_request(
self.consumer_group, [request])
self._request_d.addCallbacks(
self._handle_offset_response, self._handle_offset_error)
else:
# Create fetch request payload for our partition
request = FetchRequest(
self.topic, self.partition, self._fetch_offset,
self.buffer_size)
# Send request and add handlers for the response
self._request_d = self.client.send_fetch_request(
[request], max_wait_time=self.fetch_max_wait_time,
min_bytes=self.fetch_min_bytes)
# We need a temp for this because if the response is already
# available, _handle_fetch_response() will clear self._request_d
d = self._request_d
d.addCallback(self._handle_fetch_response)
d.addErrback(self._handle_fetch_error)
def _commit_timer_failed(self, fail):
"""Handle an error in the commit() function
Our commit() function called by the LoopingCall failed. Some error
probably came back from Kafka and _check_error() raised the exception
For now, just log the failure and restart the loop
"""
log.warning(
'_commit_timer_failed: uncaught error %r: %s in _auto_commit',
fail, fail.getBriefTraceback())
self._commit_looper_d = self._commit_looper.start(
self.auto_commit_every_s, now=False)
def _commit_timer_stopped(self, lCall):
"""We're shutting down, clean up our looping call..."""
if self._commit_looper is not lCall:
log.warning('_commit_timer_stopped with wrong timer:%s not:%s',
lCall, self._commit_looper)
else:
log.debug('_commit_timer_stopped: %s %s', lCall,
self._commit_looper)
self._commit_looper = None
self._commit_looper_d = None
| |
from __future__ import annotations
import sys
import libtbx.pkg_utils
import dials.precommitbx.nagger
if sys.version_info.major == 2:
sys.exit("Python 2 is no longer supported")
libtbx.pkg_utils.define_entry_points(
{
"dxtbx.profile_model": [
"gaussian_rs = dials.extensions.gaussian_rs_profile_model_ext:GaussianRSProfileModelExt",
"ellipsoid = dials.extensions.ellipsoid_profile_model_ext:EllipsoidProfileModelExt",
],
"dxtbx.scaling_model_ext": [
"physical = dials.algorithms.scaling.model.model:PhysicalScalingModel",
"KB = dials.algorithms.scaling.model.model:KBScalingModel",
"array = dials.algorithms.scaling.model.model:ArrayScalingModel",
"dose_decay = dials.algorithms.scaling.model.model:DoseDecay",
],
"dials.index.basis_vector_search": [
"fft1d = dials.algorithms.indexing.basis_vector_search:FFT1D",
"fft3d = dials.algorithms.indexing.basis_vector_search:FFT3D",
"real_space_grid_search = dials.algorithms.indexing.basis_vector_search:RealSpaceGridSearch",
],
"dials.index.lattice_search": [
"low_res_spot_match = dials.algorithms.indexing.lattice_search:LowResSpotMatch"
],
"dials.integration.background": [
"Auto = dials.extensions.auto_background_ext:AutoBackgroundExt",
"glm = dials.extensions.glm_background_ext:GLMBackgroundExt",
"gmodel = dials.extensions.gmodel_background_ext:GModelBackgroundExt",
"simple = dials.extensions.simple_background_ext:SimpleBackgroundExt",
"null = dials.extensions.null_background_ext:NullBackgroundExt",
"median = dials.extensions.median_background_ext:MedianBackgroundExt",
],
"dials.integration.centroid": [
"simple = dials.extensions.simple_centroid_ext:SimpleCentroidExt"
],
"dials.spotfinder.threshold": [
"dispersion = dials.extensions.dispersion_spotfinder_threshold_ext:DispersionSpotFinderThresholdExt",
"dispersion_extended = dials.extensions.dispersion_extended_spotfinder_threshold_ext:DispersionExtendedSpotFinderThresholdExt",
"radial_profile = dials.extensions.radial_profile_spotfinder_threshold_ext:RadialProfileSpotFinderThresholdExt",
],
}
)
try:
from dials.util.version import dials_version
print(dials_version())
except Exception:
pass
dials.precommitbx.nagger.nag()
def _create_dials_env_script():
"""
write dials environment setup script and clobber cctbx setup scripts
does nothing unless a file named 'dials'/'dials.bat' exists above
the build/ directory
"""
import os
import libtbx.load_env
if os.name == "nt":
filename = abs(libtbx.env.build_path.dirname() / "dials.bat")
else:
filename = abs(libtbx.env.build_path.dirname() / "dials")
if not os.path.exists(filename):
return
if os.name == "nt":
script = """
rem enable conda environment
call %~dp0conda_base\\condabin\\activate.bat
rem prepend cctbx /build/bin directory to PATH
set PATH=%~dp0build\\bin;%PATH%
"""
else:
script = """
#!/bin/bash
if [ ! -z "${LIBTBX_BUILD_RELOCATION_HINT:-}" ]; then
# possibly used for some logic in the installer
LIBTBX_BUILD="${LIBTBX_BUILD_RELOCATION_HINT}"
LIBTBX_BUILD_RELOCATION_HINT=
export LIBTBX_BUILD_RELOCATION_HINT
elif [ -n "$BASH_SOURCE" ]; then
LIBTBX_BUILD="$(dirname -- "${BASH_SOURCE[0]}")/build"
else
LIBTBX_BUILD="%s"
fi
# make path absolute and resolve symlinks
LIBTBX_BUILD=$(cd -P -- "${LIBTBX_BUILD}" && pwd -P)
# enable conda environment
source ${LIBTBX_BUILD}/../conda_base/etc/profile.d/conda.sh
conda activate $(dirname -- "${LIBTBX_BUILD}")/conda_base
# prepend cctbx /build/bin directory to PATH
PATH="${LIBTBX_BUILD}/bin:${PATH}"
export PATH
# enable DIALS command line completion
[ -n "$BASH_VERSION" ] && {
source $(libtbx.find_in_repositories dials/util/autocomplete.sh) && \
source ${LIBTBX_BUILD}/dials/autocomplete/bash.sh || \
echo dials command line completion not available
}
unset LIBTBX_BUILD
""" % abs(
libtbx.env.build_path
)
with open(filename, "w") as fh:
fh.write(script.lstrip())
if os.name != "nt":
mode = os.stat(filename).st_mode
mode |= (mode & 0o444) >> 2 # copy R bits to X
os.chmod(filename, mode)
if os.name == "nt":
clobber = """
echo {stars}
echo The script to set up the DIALS environment has changed
echo Please source or run {newscript} instead
echo {stars}
"""
clobber_extensions = (".sh", ".csh", ".bat")
else:
clobber = """
echo '{stars}'
echo The script to set up the DIALS environment has changed
echo Please source or run '{newscript}' instead
echo '{stars}'
"""
clobber_extensions = (".sh", ".csh", ".bat")
for clobberfile_name in (
"setpaths",
"setpaths_all",
"setpaths_debug",
):
for clobber_ext in clobber_extensions:
with open(
abs(libtbx.env.build_path / (clobberfile_name + clobber_ext)), "w"
) as fh:
fh.write(clobber.format(newscript=filename, stars="*" * 74))
def _install_dials_autocompletion():
"""generate bash.sh and SConscript file in /build/dials/autocomplete"""
import os # required due to cctbx weirdness
import libtbx.load_env
# Find the dials source directory
dist_path = libtbx.env.dist_path("dials")
# Set the location of the output directory
output_directory = libtbx.env.under_build(os.path.join("dials", "autocomplete"))
try:
os.makedirs(output_directory)
except OSError:
pass
# Build a list of autocompleteable commands
commands_dir = os.path.join(dist_path, "command_line")
command_list = []
for filename in sorted(os.listdir(commands_dir)):
if not filename.startswith("_") and filename.endswith(".py"):
# Check if this file marks itself as completable
with open(os.path.join(commands_dir, filename), "rb") as f:
if b"DIALS_ENABLE_COMMAND_LINE_COMPLETION" in f.read():
command_name = f"dials.{filename[:-3]}"
command_list.append(command_name)
print("Identified autocompletable commands: " + " ".join(command_list))
# Generate the autocompletion SConscript.
with open(os.path.join(output_directory, "SConscript"), "w") as builder:
builder.write(
"""import os.path
import libtbx.load_env\n
Import("env")\n\n
def dispatcher_outer(name):
return os.path.join(libtbx.env.under_build("bin"), name)\n\n
def dispatcher_inner(name):
return os.path.join(
libtbx.env.dist_path("dials"), "command_line", "%s.py" % name.partition(".")[2]
)\n\n
env.Append(
BUILDERS={{
"AutoComplete": Builder(action="-$SOURCE --export-autocomplete-hints > $TARGET")
}}
)
env["ENV"]["DIALS_NOBANNER"] = "1"
for cmd in [
{}
]:
ac = env.AutoComplete(cmd, [dispatcher_outer(cmd), dispatcher_inner(cmd)])
Requires(ac, Dir(libtbx.env.under_build("lib")))
Depends(ac, os.path.join(libtbx.env.dist_path("dials"), "util", "options.py"))
Depends(ac, os.path.join(libtbx.env.dist_path("dials"), "util", "autocomplete.sh"))
""".format(
"\n".join([f' "{cmd}",' for cmd in command_list])
)
)
# Generate a bash script activating command line completion for each relevant command
with open(os.path.join(output_directory, "bash.sh"), "w") as script:
script.write("type compopt &>/dev/null && {\n")
for cmd in command_list:
script.write(f" complete -F _dials_autocomplete {cmd}\n")
script.write("}\n")
script.write("type compopt &>/dev/null || {\n")
for cmd in command_list:
script.write(f" complete -o nospace -F _dials_autocomplete {cmd}\n")
script.write("}\n")
_create_dials_env_script()
_install_dials_autocompletion()
| |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry._core import distance_point_point_xy
from compas.geometry._core import distance_point_line_xy
from compas.geometry._core import closest_point_on_segment_xy
__all__ = [
'is_ccw_xy',
'is_colinear_xy',
'is_polygon_convex_xy',
'is_point_on_line_xy',
'is_point_on_segment_xy',
'is_point_on_polyline_xy',
'is_point_in_triangle_xy',
'is_point_in_polygon_xy',
'is_point_in_convex_polygon_xy',
'is_point_in_circle_xy',
'is_polygon_in_polygon_xy',
'is_intersection_line_line_xy',
'is_intersection_segment_segment_xy',
]
def is_ccw_xy(a, b, c, colinear=False):
"""Determine if c is on the left of ab when looking from a to b,
and assuming that all points lie in the XY plane.
Parameters
----------
a : [float, float, float] | :class:`~compas.geometry.Point`
Base point defined by XY(Z) coordinates.
b : [float, float, float] | :class:`~compas.geometry.Point`
First end point defined by XY(Z) coordinates.
c : [float, float, float] | :class:`~compas.geometry.Point`
Second end point defined by XY(Z) coordinates.
colinear : bool, optional
If True, colinear points will return a positive result.
Returns
-------
bool
True if ccw.
False otherwise.
References
----------
For more info, see [1]_.
.. [1] Marsh, C. *Computational Geometry in Python: From Theory to Application*.
Available at: https://www.toptal.com/python/computational-geometry-in-python-from-theory-to-implementation
Examples
--------
>>> print(is_ccw_xy([0,0,0], [0,1,0], [-1, 0, 0]))
True
>>> print(is_ccw_xy([0,0,0], [0,1,0], [+1, 0, 0]))
False
>>> print(is_ccw_xy([0,0,0], [1,0,0], [2,0,0]))
False
>>> print(is_ccw_xy([0,0,0], [1,0,0], [2,0,0], True))
True
"""
ab_x = b[0] - a[0]
ab_y = b[1] - a[1]
ac_x = c[0] - a[0]
ac_y = c[1] - a[1]
if colinear:
return ab_x * ac_y - ab_y * ac_x >= 0
return ab_x * ac_y - ab_y * ac_x > 0
def is_colinear_xy(a, b, c):
"""Determine if three points are colinear on the XY-plane.
Parameters
----------
a : [float, float, float] | :class:`~compas.geometry.Point`
Point 1 defined by XY(Z) coordinates.
b : [float, float, float] | :class:`~compas.geometry.Point`
Point 2 defined by XY(Z) coordinates.
c : [float, float, float] | :class:`~compas.geometry.Point`
Point 3 defined by XY(Z) coordinates.
Returns
-------
bool
True if the points are colinear.
False otherwise.
"""
ab_x = b[0] - a[0]
ab_y = b[1] - a[1]
ac_x = c[0] - a[0]
ac_y = c[1] - a[1]
return ab_x * ac_y == ab_y * ac_x
def is_polygon_convex_xy(polygon, colinear=False):
"""Determine if the polygon is convex on the XY-plane.
Parameters
----------
polygon : sequence[point] | :class:`~compas.geometry.Polygon`
The XY(Z) coordinates of the corners of a polygon.
The vertices are assumed to be in order.
The polygon is assumed to be closed: the first and last vertex in the sequence should not be the same.
colinear : bool, optional
Are points allowed to be colinear?
Returns
-------
bool
True if the polygon is convex.
False otherwise.
"""
a = polygon[-2]
b = polygon[-1]
c = polygon[0]
direction = is_ccw_xy(a, b, c, colinear)
for i in range(-1, len(polygon) - 2):
a = b
b = c
c = polygon[i + 2]
if direction != is_ccw_xy(a, b, c, colinear):
return False
return True
def is_point_on_line_xy(point, line, tol=1e-6):
"""Determine if a point lies on a line on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point.
line : [point, point] | :class:`~compas.geometry.Line`
XY(Z) coordinates of two points defining a line.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is in on the line.
False otherwise.
"""
return distance_point_line_xy(point, line) <= tol
def is_point_on_segment_xy(point, segment, tol=1e-6):
"""Determine if a point lies on a given line segment on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point.
segment : [point, point] | :class:`~compas.geometry.Line`
XY(Z) coordinates of two points defining a segment.
tol : float, optional
A tolerance for membership verification.
Returns
-------
bool
True if the point is on the line segment.
False otherwise.
"""
a, b = segment
if not is_point_on_line_xy(point, segment, tol=tol):
return False
d_ab = distance_point_point_xy(a, b)
if d_ab == 0:
return False
d_pa = distance_point_point_xy(a, point)
d_pb = distance_point_point_xy(b, point)
if d_pa + d_pb <= d_ab + tol:
return True
return False
def is_point_on_polyline_xy(point, polyline, tol=1e-6):
"""Determine if a point is on a polyline on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates.
polyline : sequence[point] | :class:`~compas.geometry.Polyline`
XY(Z) coordinates of the points of the polyline.
tol : float, optional
The tolerance for membership verification.
Returns
-------
bool
True if the point is on the polyline.
False otherwise.
"""
for i in range(len(polyline) - 1):
a = polyline[i]
b = polyline[i + 1]
c = closest_point_on_segment_xy(point, (a, b))
if distance_point_point_xy(point, c) <= tol:
return True
return False
def is_point_in_triangle_xy(point, triangle, colinear=False):
"""Determine if a point is in the interior of a triangle lying on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point.
triangle : [point, point, point]
XY(Z) coordinates of the corners of the triangle.
colinear : bool, optional
Allow points to be colinear.
Returns
-------
bool
True if the point is in the convex polygon.
False otherwise.
"""
a, b, c = triangle
ccw = is_ccw_xy(c, a, point, colinear)
if ccw != is_ccw_xy(a, b, point, colinear):
return False
if ccw != is_ccw_xy(b, c, point, colinear):
return False
return True
def is_point_in_convex_polygon_xy(point, polygon):
"""Determine if a point is in the interior of a convex polygon lying on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point (Z will be ignored).
polygon : sequence[point] | :class:`~compas.geometry.Polygon`
A sequence of XY(Z) coordinates of points representing the locations of the corners of a polygon (Z will be ignored).
The vertices are assumed to be in order. The polygon is assumed to be closed:
the first and last vertex in the sequence should not be the same.
Returns
-------
bool
True if the point is in the convex polygon
False otherwise.
Warnings
--------
Does not work for concave polygons.
"""
ccw = None
for i in range(-1, len(polygon) - 1):
a = polygon[i]
b = polygon[i + 1]
if ccw is None:
ccw = is_ccw_xy(a, b, point, True)
else:
if ccw != is_ccw_xy(a, b, point, True):
return False
return True
def is_point_in_polygon_xy(point, polygon):
"""Determine if a point is in the interior of a polygon lying on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point (Z will be ignored).
polygon : sequence[point] | :class:`~compas.geometry.Polygon`
A sequence of XY(Z) coordinates of points representing the locations of the corners of a polygon (Z will be ignored).
The vertices are assumed to be in order.
The polygon is assumed to be closed.
The first and last vertex in the sequence should not be the same.
Returns
-------
bool
True if the point is in the polygon.
False otherwise.
Warnings
--------
A boundary check is not yet implemented. This should include a tolerance value.
"""
x, y = point[0], point[1]
polygon = [(p[0], p[1]) for p in polygon] # make 2D
inside = False
for i in range(-1, len(polygon) - 1):
x1, y1 = polygon[i]
x2, y2 = polygon[i + 1]
if y > min(y1, y2):
if y <= max(y1, y2):
if x <= max(x1, x2):
if y1 != y2:
xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1
if x1 == x2 or x <= xinters:
inside = not inside
return inside
def is_point_in_circle_xy(point, circle):
"""Determine if a point lies in a circle lying on the XY-plane.
Parameters
----------
point : [float, float, float] | :class:`~compas.geometry.Point`
XY(Z) coordinates of a point (Z will be ignored).
circle : [point, float]
Center and radius of the circle on the XY plane.
Returns
-------
bool
True if the point lies in the circle.
False otherwise.
"""
dis = distance_point_point_xy(point, circle[0])
if dis <= circle[1]:
return True
return False
def is_polygon_in_polygon_xy(polygon1, polygon2):
"""Determine if a polygon is in the interior of another polygon on the XY-plane.
Parameters
----------
polygon1 : sequence[point] | :class:`~compas.geometry.Polygon`
List of XY(Z) coordinates of points representing the locations of the corners of the exterior polygon (Z will be ignored).
The vertices are assumed to be in order. The polygon is assumed to be closed:
the first and last vertex in the sequence should not be the same.
polygon2 : sequence[point] | :class:`~compas.geometry.Polygon`
List of XY(Z) coordinates of points representing the locations of the corners of the interior polygon (Z will be ignored).
The vertices are assumed to be in order. The polygon is assumed to be closed:
the first and last vertex in the sequence should not be the same.
Returns
-------
bool
True if polygon2 is inside polygon1.
False otherwise.
"""
if is_polygon_convex_xy(polygon1) and is_polygon_convex_xy(polygon2):
for pt in polygon2:
if not is_point_in_convex_polygon_xy(pt, polygon1):
return False
return True
else:
for i in range(len(polygon1)):
line = [polygon1[-i], polygon1[-i - 1]]
for j in range(len(polygon2)):
line_ = [polygon2[-j], polygon2[j - 1]]
if is_intersection_segment_segment_xy(line, line_):
return False
for pt in polygon2:
if is_point_in_polygon_xy(pt, polygon1):
return True
return False
def is_intersection_line_line_xy(l1, l2, tol=1e-6):
"""Verifies if two lines intersect on the XY-plane.
Parameters
----------
l1 : [point, point] | :class:`~compas.geometry.Line`
XY(Z) coordinates of two points defining a line.
l2 : [point, point] | :class:`~compas.geometry.Line`
XY(Z) coordinates of two points defining a line.
tol : float, optional
A tolerance for intersection verification.
Returns
--------
bool
True if the lines intersect in one point
False if the lines are skew, parallel or lie on top of each other.
"""
raise NotImplementedError
def is_intersection_segment_segment_xy(ab, cd):
"""Determines if two segments, ab and cd, intersect.
Parameters
----------
ab : [point, point] | :class:`~compas.geometry.Line`
Two points representing the start and end points of a segment.
Z coordinates will be ignored.
cd : [point, point] | :class:`~compas.geometry.Line`
Two points representing the start and end points of a segment.
Z coordinates will be ignored.
Returns
-------
bool
True if the segments intersect.
False otherwise.
Notes
-----
The segments intersect if both of the following conditions are true:
* `c` is on the left of `ab`, and `d` is on the right, or vice versa.
* `d` is on the left of `ac`, and on the right of `bc`, or vice versa.
"""
a, b = ab
c, d = cd
return is_ccw_xy(a, c, d) != is_ccw_xy(b, c, d) and is_ccw_xy(a, b, c) != is_ccw_xy(a, b, d)
| |
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
from myPersonalFunctions import *
import imp
# Useful codes
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# tr " " "\n"
# sed 1d
# sort -u -k 3
# sed -e 's/+T//'
mypath = os.environ["PATH"]
os.environ["PATH"] = "/home/wl45/python/bin:/home/wl45/opt:" + mypath
my_env = os.environ.copy()
parser = argparse.ArgumentParser(
description="I put quick sbatch commands here")
# parser.add_argument("protein", help="the name of protein")
# parser.add_argument("template", help="the name of template file")
parser.add_argument("--qnqc", help="calculate q of n terminal and q of c terminal ", action="store_true", default=False)
parser.add_argument("-m", "--mode",type=int, default=-1)
args = parser.parse_args()
# protein_name = args.template.split('_', 1)[-1].strip('/')
# protein_name = args.protein.strip('/')
# name = "ga_2m"
def calQnQc(i=-1):
qn_start = 0
qn_end = 80
qc_start = 80
qc_end = 181
qc2_start = 130
qc2_end = 181
if i == -1:
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj {} 0.15 {} {}".format("qn", qn_start, qn_end))
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj {} 0.15 {} {}".format("qc", qc_start, qc_end))
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj {} 0.15 {} {}".format("qc2", qc2_start, qc2_end))
size1 = file_len("qn")
size2 = file_len("qc")
size3 = file_len("qc2")
else:
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj.{} {} 0.15 {} {}".format(i, f"qn_{i}", qn_start, qn_end))
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj.{} {} 0.15 {} {}".format(i, f"qc_{i}", qc_start, qc_end))
os.system("python2 ~/opt/CalcQnQc.py 2xov.pdb dump.lammpstrj.{} {} 0.15 {} {}".format(i, f"qc2_{i}", qc2_start, qc2_end))
size1 = file_len(f"qn_{i}")
size2 = file_len(f"qc_{i}")
size3 = file_len(f"qc2_{i}")
# os.system("paste qn qc > qnqc")
# if(size1 < 400 or size2 < 400 or size3 < 400):
# raise ValueError('file length too small')
# os.system("head -n 4000 qn > qn_all")
# os.system("head -n 4000 qc > qc_all")
# os.system("head -n 4000 qc2 > qc2_all")
# os.system("tail -n 2000 qn_all > qn_half")
# os.system("tail -n 2000 qc_all > qc_half")
# os.system("tail -n 2000 qc2_all > qc2_half")
# os.system("paste qn_half qc_half qc2_half ")
if(args.qnqc):
if args.mode == -1:
calQnQc(args.mode)
else:
for i in range(args.mode):
calQnQc(i)
# number_of_run_list = [2, 4, 8, 16]
# for n in number_of_run_list:
# name = "ga_"+str(n)+"m"
# # os.system("mkdir "+name)
# os.system("cp -r 2lhd.pdb "+name)
#
# # os.system("cp -r 2lhc variables.dat "+name)
# os.chdir(name)
# for i in range(20):
# os.chdir("analysis/"+str(i))
# os.system("cp ../../2lhd.pdb .")
# os.system("python2 ~/opt/script/CalcQValue.py 2lhd.pdb dump.lammpstrj q_gb.dat")
# os.system("python2 ~/opt/script/CalcQValue.py 2lhc.pdb dump.lammpstrj q_ga.dat")
# os.system("cp ~/opt/small_script/qw_gagb.plt .")
# os.system("gnuplot qw_gagb.plt")
# os.system("mv qw_gagb.pdf ../../results/qw_gagb_{0}.pdf".format(str(i)))
# os.chdir("../..")
# os.chdir("..")
#
# for n in number_of_run_list:
# name = "ga_"+str(n)+"m"
# # os.system("mkdir "+name)
# os.system("cp -r 2lhd.pdb "+name)
#
# # os.system("cp -r 2lhc variables.dat "+name)
# os.chdir(name)
# for i in range(20):
# os.chdir("analysis/"+str(i))
# os.system("paste q_ga.dat q_gb.dat > q_gagb.dat")
# os.system("cp ~/opt/small_script/qw_ga-gb.plt .")
# os.system("gnuplot qw_ga-gb.plt")
# os.system("mv qw_ga-gb.pdf ../../results/qw_ga-gb_{0}.pdf".format(str(i)))
# os.chdir("../..")
# os.system("cp ~/opt/small_script/qw_ga_all.plt .")
# os.system("gnuplot qw_ga_all.plt")
# os.system("cp ~/opt/small_script/qw_gb_all.plt .")
# os.system("gnuplot qw_gb_all.plt")
# os.system("cp ~/opt/small_script/qw_diff_all.plt .")
# os.system("gnuplot qw_diff_all.plt")
# os.chdir("..")
# simulation_steps = 4 * 10**6
# warm_up_steps = 10 * 10**5
#
# seed(datetime.now())
# n= 20
# vmd = "/Applications/VMD\ 1.9.2.app/Contents/MacOS/startup.command"
#
# os.system("BuildAllAtomsFromLammps.py dump.lammpstrj movie")
# os.system("cp ~/opt/plot_scripts/2xov_movie.tcl .")
# os.system(vmd+" -e 2xov_movie.tcl ")
# os.system("mkdir -p MyResults")
# for i in range(n):
# print(i)
# os.chdir("analysis/"+str(i))
# os.system("cp ~/opt/plot_scripts/2xov_movie_screenshot.tcl .")
# os.system(vmd+" -e 2xov_movie_screenshot.tcl")
# os.system("cp frame1000.tga ../../MyResults/frame"+str(i)+"_1000.tga")
# #os.system("cp frame450.tga ../Results/frame"+folder_name+"_450.tga")
# # os.system("movie.py "+protein_name)
# os.chdir("../..")
# # analysis
# folder_name = ""
# result_folder = "WeiLu_Aug_07"
# protein_list = ['T089', 'T120', 'T251', 'TOP7', '1UBQ']
# sublist = ['']
# # sublist = ['_ha', '_he']
# # sublist = ['_lp', '_he_lp']
# # folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_name = protein+sub
# os.chdir(folder_name)
# os.chdir("best_2nd")
# os.system("pymol ~/opt/plot_scripts/align.pml > matrix.dat")
# os.system("head -n 70 matrix.dat | tail -n 20 > cealign_matrix.dat")
# # for i in range(19, -1, -1):
# # os.system("mv {}.pdb {}.pdb".format(i, i+1))
# os.chdir("../..")
# os.chdir(protein)
# os.chdir("best_1st")
# os.system("python3 ~/opt/small_script/cross_q.py")
# os.chdir("..")
# os.chdir("best_2nd")
# os.system("python3 ~/opt/small_script/cross_q.py")
# os.chdir("..")
# os.chdir("..")
# n = 3
# for i in range(n):
# # simulation set up
# folder_name = str(i)
# os.system("mkdir -p "+folder_name)
# os.system("cp -r "+args.protein+"* "+folder_name)
# os.chdir(folder_name)
# os.system("cp ../../helix_less/simulation/"+str(i)+"/restart.4000000 .")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/WARM_UP_STEPS/'" +
# str(warm_up_steps) +
# "'/g' "+protein_name+".in")
# os.system( # replace RANDOM with a radnom number
# "sed -i.bak 's/RANDOM/'" +
# str(randint(1, 10**6)) +
# "'/g' "+protein_name+".in")
# os.system( # replace SIMULATION_STEPS with specific steps
# "sed -i.bak 's/SIMULATION_STEPS/'" +
# str(simulation_steps) +
# "'/g' "+protein_name+".in")
# # if(platform.system() == 'Darwin'):
# # os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# # < "+protein_name+".in")
# if(platform.system() == 'Darwin'):
# os.system("/Users/weilu/Documents/lammps-9Oct12_modified/src/lmp_serial \
# < "+protein_name+".in")
# elif(platform.system() == 'Linux'):
# os.system("cp ~/opt/run.slurm .")
# os.system( # replace PROTEIN with pdb name
# "sed -i.bak 's/PROTEIN/'" +
# protein_name +
# "'/g' run.slurm")
# os.system("sbatch run.slurm")
# else:
# print("system unkown")
# os.chdir("..")
# exit(1)
# w_helix_list = [0.1, 0.5, 1, 1.5]
# m_helix_list = [0.1, 0.5, 1, 1.5]
#
# for i in range(len(w_helix_list)):
# w = w_helix_list[i]
# for j in range(len(m_helix_list)):
#
# # m = m_helix_list[j]
# folder_name = str(i)+"_"+str(j)
# # os.system("cd "folder_name)
# os.chdir(folder_name)
# # os.system("analysis.py 2xov/")
# # os.system("echo "+folder_name+" >> ../all")
# os.system("sort -k 3 analysis/list_of_max_q > ../data/"+folder_name)
# os.chdir("..")
# # os.system("mkdir "+folder_name)
# # os.chdir(folder_name)
# # os.system("cp -r ../2xov .")
# # os.chdir("2xov")
# # os.system(
# # "sed -i.bak 's/W_HELIX/'" +
# # str(w) +
# # "'/g' fix_backbone_coeff.data")
# # os.system(
# # "sed -i.bak 's/M_HELIX/'" +
# # str(m) +
# # "'/g' fix_backbone_coeff.data")
# # os.chdir("..")
# # os.system("run.py 2xov/ -n 5")
# os.system("cp ~/opt/gg.py this_gg.py")
# for i in range(5):
# os.system("mkdir "+str(i))
# os.chdir(str(i))
# os.system("cp -r ../2xov/ .")
# os.system("cp ../../2xov_strong_single_memory_600to500/simulation/"+str(i)+"/restart.2000000 2xov/")
# os.system("run.py -s 4 -n 2 2xov/")
# os.chdir("..")
# # rama_list = [6, 8, 16]
# # rama_list = [4]
# melt_t_list = [400, 500, 600]
# for variable in melt_t_list:
# folder_name = str(variable)
# os.system("mkdir "+folder_name)
# os.chdir(folder_name)
# os.system("cp -r ../1qjp .")
# os.chdir("1qjp")
# os.system(
# "sed -i.bak 's/MELTT/'" +
# str(variable) +
# "'/g' 1qjp.in")
# os.chdir("..")
# # os.system("pwd")
# os.system("run.py 1qjp/ -n 5 -s 5")
# os.chdir("..")
# os.system("cp ~/opt/gg.py this_gg.py")
#
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
#
# protein_name = args.protein.strip('/')
#
# temp = 400
# folder_name = "{}_t{}_q100_test11".format(protein_name, str(temp))
# print("all going to "+folder_name)
# os.system("mkdir -p "+folder_name)
# os.system("rm -f "+folder_name + "/*")
# command = 'cat simulation/{}/%d/wham11 \
# >> {}/all_wham.dat'.format(temp, folder_name)
# # cal rmsd
# os.chdir("simulation/"+str(temp))
# for i in range(n):
# os.chdir(str(i))
# os.system("awk '{print>\"file1\"(NR>(n/2)?2:1)}' n=\"$(wc -l <file1)\" file1")
# os.system("cat file11 >> ../../../"+folder_name+"/rmsd_total")
# # os.system("sed 1d wham.dat > wham1d.dat")
# os.system("awk '{print>\"wham1\"(NR>(n/2)?2:1)}' n=\"$(wc -l <wham1)\" wham1")
# os.chdir("..")
# os.chdir("../..")
# for i in range(n):
# cmd = command % i
# os.system(cmd)
# os.chdir(folder_name)
# os.system("awk '{print $2}' all_wham.dat > Qw_total")
# os.system("awk '{print $3}' all_wham.dat > rg_total")
# os.system("awk '{print $4}' all_wham.dat > p_total")
# os.system("awk '{print $5}' all_wham.dat > tc_total")
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# os.system("cp ~/opt/wham_analysis/*.m .")
# os.chdir("..")
# os.system("~/opt/script/wham/fused_calc_cv.sc {} top7 50 400 350 450 5 50 100 0 0.98".format(folder_name))
#
#
# folder_name = "{}_t{}_q100_test12".format(protein_name, str(temp))
# print("all going to "+folder_name)
# os.system("mkdir -p "+folder_name)
# os.system("rm -f "+folder_name + "/*")
# command = 'cat simulation/{}/%d/wham12 \
# >> {}/all_wham.dat'.format(temp, folder_name)
# # cal rmsd
# os.chdir("simulation/"+str(temp))
# for i in range(n):
# os.chdir(str(i))
# os.system("cat file12 >> ../../../"+folder_name+"/rmsd_total")
# os.chdir("..")
# os.chdir("../..")
# for i in range(n):
# cmd = command % i
# os.system(cmd)
# os.chdir(folder_name)
# os.system("awk '{print $2}' all_wham.dat > Qw_total")
# os.system("awk '{print $3}' all_wham.dat > rg_total")
# os.system("awk '{print $4}' all_wham.dat > p_total")
# os.system("awk '{print $5}' all_wham.dat > tc_total")
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# os.system("cp ~/opt/wham_analysis/*.m .")
# os.chdir("..")
#
#
#
# os.system("~/opt/script/wham/fused_calc_cv.sc {} top7 50 400 350 450 5 50 100 0 0.98".format(folder_name))
#
# result_folder = "WeiLu_Aug_07"
# os.system("mkdir -p "+result_folder)
# protein_list = ['T089', 'T120', 'T251', 'top7', '1UBQ']
# # sublist = ['_ha', '_he']
# sublist = ['_lp', '_he_lp']
# folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_list += [protein+sub]
# print(folder_list)
# # exit(1)
# # awk '{print>'file'(NR>(n/2)?2:1)}' n='$(wc -l <test)' test
# for folder in folder_list:
# print(folder)
# os.chdir(folder)
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
# os.system("mkdir -p ../{}/".format(result_folder)+folder+"/best_q")
# os.system("sort analysis/list_of_max_q > ../{}/q_".format(result_folder)+folder+".dat")
# for i in range(n):
# # move
# os.chdir("analysis/"+str(i))
# os.system("cp chosen.pdb ../../../{}/".format(result_folder) + folder+"/best_q/"+str(i)+".pdb")
# os.chdir("../..")
# os.chdir("..")
# result_folder = "WeiLu_Aug_07"
# os.system("mkdir -p "+result_folder)
# protein_list = ['T089', 'T120', 'T251', 'top7', '1UBQ']
# # sublist = ['_ha', '_he']
# sublist = ['_lp', '_he_lp']
# folder_list = []
# for protein in protein_list:
# for sub in sublist:
# folder_list += [protein+sub]
# print(folder_list)
# # exit(1)
#
# for folder in folder_list:
# print(folder)
# os.chdir(folder)
# exec (open("config.py").read())
# n = number_of_run
# steps = simulation_steps
# os.system("mkdir -p ../{}/".format(result_folder)+folder+"/best_q")
# os.system("sort analysis/list_of_max_q > ../{}/q_".format(result_folder)+folder+".dat")
# for i in range(n):
# # move
# os.chdir("analysis/"+str(i))
# os.system("cp chosen.pdb ../../../{}/".format(result_folder) + folder+"/best_q/"+str(i)+".pdb")
# os.chdir("../..")
# os.chdir("..")
| |
'''
This module contains all the stuff to make your way from python code to
a dynamic library, see __init__.py for exported interfaces.
'''
from pythran.backend import Cxx
from pythran.config import cfg, make_extension
from pythran.cxxgen import BoostPythonModule, Define, Include, Line, Statement
from pythran.cxxgen import FunctionBody, FunctionDeclaration, Value, Block
from pythran.intrinsic import ConstExceptionIntr
from pythran.middlend import refine
from pythran.passmanager import PassManager
from pythran.tables import pythran_ward, functions
from pythran.types.types import extract_constructed_types
from pythran.types.type_dependencies import pytype_to_deps
from pythran.types.conversion import pytype_to_ctype
from pythran.spec import expand_specs
from pythran.syntax import check_specs
import pythran.frontend as frontend
from distutils.errors import CompileError
from numpy.distutils.core import setup
from numpy.distutils.extension import Extension
import numpy.distutils.ccompiler
from subprocess import check_output, STDOUT, CalledProcessError
from tempfile import mkstemp, mkdtemp
import ast
import logging
import networkx as nx
import os.path
import shutil
import sys
import sysconfig
import glob
logger = logging.getLogger(__name__)
# hook taken from numpy.distutils.compiler
# with useless steps and warning removed
def CCompiler_customize(self, dist, need_cxx=0):
logger.info('customize %s' % (self.__class__.__name__))
numpy.distutils.ccompiler.customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
numpy.distutils.ccompiler.replace_method(numpy.distutils.ccompiler.CCompiler,
'customize', CCompiler_customize)
def _extract_all_constructed_types(v):
return sorted(set(reduce(lambda x, y: x + y,
(extract_constructed_types(t) for t in v), [])),
key=len)
def _extract_specs_dependencies(specs):
""" Extract types dependencies from specs for each exported signature. """
deps = set()
# for each function
for signatures in specs.values():
# for each signature
for signature in signatures:
# for each argument
for t in signature:
deps.update(pytype_to_deps(t))
# Keep "include" first
return sorted(deps, key=lambda x: "include" not in x)
def _parse_optimization(optimization):
'''Turns an optimization of the form
my_optim
my_package.my_optim
into the associated symbol'''
splitted = optimization.split('.')
if len(splitted) == 1:
splitted = ['pythran', 'optimizations'] + splitted
return reduce(getattr, splitted[1:], __import__(splitted[0]))
def _get_temp(content, suffix=".cpp"):
'''Get a temporary file for given content, default extension is .cpp
It is user's responsability to delete when done.'''
fd, fdpath = mkstemp(suffix)
with os.fdopen(fd, "w") as cpp:
cpp.write(content)
return fd, fdpath
class HasArgument(ast.NodeVisitor):
'''Checks if a given function has arguments'''
def __init__(self, fname):
self.fname = fname
def visit_Module(self, node):
for n in node.body:
if type(n) is ast.FunctionDef and n.name == self.fname:
return len(n.args.args) > 0
return False
# PUBLIC INTERFACE STARTS HERE
def generate_cxx(module_name, code, specs=None, optimizations=None):
'''python + pythran spec -> c++ code
returns a BoostPythonModule object
'''
pm = PassManager(module_name)
# front end
ir, renamings = frontend.parse(pm, code)
# middle-end
optimizations = (optimizations or
cfg.get('pythran', 'optimizations').split())
optimizations = map(_parse_optimization, optimizations)
refine(pm, ir, optimizations)
# back-end
content = pm.dump(Cxx, ir)
# instanciate the meta program
if specs is None:
class Generable:
def __init__(self, content):
self.content = content
def __str__(self):
return str(self.content)
generate = __str__
mod = Generable(content)
else:
# uniform typing
for fname, signatures in specs.items():
if not isinstance(signatures, tuple):
specs[fname] = (signatures,)
# verify the pythran export are compatible with the code
specs = expand_specs(specs)
check_specs(ir, specs, renamings)
mod = BoostPythonModule(module_name)
mod.use_private_namespace = False
# very low value for max_arity leads to various bugs
min_val = 2
specs_max = [max(map(len, s)) for s in specs.itervalues()]
max_arity = max([min_val] + specs_max)
mod.add_to_preamble([Define("BOOST_PYTHON_MAX_ARITY", max_arity)])
mod.add_to_preamble([Define("BOOST_SIMD_NO_STRICT_ALIASING", "1")])
mod.add_to_preamble([Include("pythonic/core.hpp")])
mod.add_to_preamble([Include("pythonic/python/core.hpp")])
mod.add_to_preamble([Line("#ifdef _OPENMP\n#include <omp.h>\n#endif")])
mod.add_to_preamble(map(Include, _extract_specs_dependencies(specs)))
mod.add_to_preamble(content.body)
mod.add_to_init([
Line('#ifdef PYTHONIC_TYPES_NDARRAY_HPP\nimport_array()\n#endif')])
# topologically sorted exceptions based on the inheritance hierarchy.
# needed because otherwise boost python register_exception handlers
# do not catch exception type in the right way
# (first valid exception is selected)
# Inheritance has to be taken into account in the registration order.
exceptions = nx.DiGraph()
for function_name, v in functions.iteritems():
for mname, symbol in v:
if isinstance(symbol, ConstExceptionIntr):
exceptions.add_node(
getattr(sys.modules[".".join(mname)], function_name))
# add edges based on class relationships
for n in exceptions:
if n.__base__ in exceptions:
exceptions.add_edge(n.__base__, n)
sorted_exceptions = nx.topological_sort(exceptions)
mod.add_to_init([
# register exception only if they can be raise from C++ world to
# Python world. Preprocessors variables are set only if deps
# analysis detect that this exception can be raised
Line('#ifdef PYTHONIC_BUILTIN_%s_HPP\n'
'boost::python::register_exception_translator<'
'pythonic::types::%s>(&pythonic::translate_%s);\n'
'#endif' % (n.__name__.upper(), n.__name__, n.__name__)
) for n in sorted_exceptions])
mod.add_to_init([
# make sure we get no nested parallelism that wreaks havoc in perf
Line('#ifdef _OPENMP\n'
'omp_set_max_active_levels(1);\n'
'#endif')])
for function_name, signatures in specs.iteritems():
internal_func_name = renamings.get(function_name,
function_name)
for sigid, signature in enumerate(signatures):
numbered_function_name = "{0}{1}".format(internal_func_name,
sigid)
arguments_types = [pytype_to_ctype(t) for t in signature]
has_arguments = HasArgument(internal_func_name).visit(ir)
arguments = ["a{0}".format(i)
for i in xrange(len(arguments_types))]
name_fmt = pythran_ward + "{0}::{1}::type{2}"
args_list = ", ".join(arguments_types)
specialized_fname = name_fmt.format(module_name,
internal_func_name,
"<{0}>".format(args_list)
if has_arguments else "")
result_type = ("typename std::remove_cv<"
"typename std::remove_reference"
"<typename {0}::result_type>::type"
">::type").format(specialized_fname)
mod.add_to_init(
[Statement("pythonic::python_to_pythran<{0}>()".format(t))
for t in _extract_all_constructed_types(signature)])
mod.add_to_init([Statement(
"pythonic::pythran_to_python<{0}>()".format(result_type))])
mod.add_function(
FunctionBody(
FunctionDeclaration(
Value(
result_type,
numbered_function_name),
[Value(t, a)
for t, a in zip(arguments_types, arguments)]),
Block([Statement("return {0}()({1})".format(
pythran_ward + '{0}::{1}'.format(
module_name, internal_func_name),
', '.join(arguments)))])
),
function_name
)
# call __init__() to execute top-level statements
init_call = '::'.join([pythran_ward + module_name, '__init__()()'])
mod.add_to_init([Statement(init_call)])
return mod
def compile_cxxfile(cxxfile, module_so=None, **kwargs):
'''c++ file -> native module
Return the filename of the produced shared library
Raises CompileError on failure
'''
if module_so:
module_name, _ = os.path.splitext(os.path.basename(module_so))
else:
module_name, _ = os.path.splitext(os.path.basename(cxxfile))
builddir = mkdtemp()
buildtmp = mkdtemp()
extension_args = make_extension(**kwargs)
extension = Extension(module_name,
[cxxfile],
language="c++",
**extension_args)
try:
setup(name=module_name,
ext_modules=[extension],
# fake CLI call
script_name='setup.py',
script_args=['--verbose'
if logger.isEnabledFor(logging.INFO)
else '--quiet',
'build_ext',
'--build-lib', builddir,
'--build-temp', buildtmp,
]
)
except SystemExit as e:
raise CompileError(e.args)
[target] = glob.glob(os.path.join(builddir, module_name + "*"))
if module_so:
shutil.move(target, module_so)
else:
shutil.move(target, os.getcwd())
module_so = os.path.join(os.getcwd(), os.path.basename(target))
shutil.rmtree(builddir)
shutil.rmtree(buildtmp)
logger.info("Generated module: " + module_name)
logger.info("Output: " + module_so)
return module_so
def compile_cxxcode(cxxcode, module_so=None, keep_temp=False,
**kwargs):
'''c++ code (string) -> temporary file -> native module.
Returns the generated .so.
'''
# Get a temporary C++ file to compile
fd, fdpath = _get_temp(cxxcode)
module_so = compile_cxxfile(fdpath, module_so, **kwargs)
if not keep_temp:
# remove tempfile
os.remove(fdpath)
else:
logger.warn("Keeping temporary generated file:" + fdpath)
return module_so
def compile_pythrancode(module_name, pythrancode, specs=None,
opts=None, cpponly=False, module_so=None,
**kwargs):
'''Pythran code (string) -> c++ code -> native module
Returns the generated .so (or .cpp if `cpponly` is set to true).
'''
# Autodetect the Pythran spec if not given as parameter
from spec import spec_parser
if specs is None:
specs = spec_parser(pythrancode)
# Generate C++, get a BoostPythonModule object
module = generate_cxx(module_name, pythrancode, specs, opts)
if cpponly:
# User wants only the C++ code
_, output_file = _get_temp(str(module))
if module_so:
shutil.move(output_file, module_so)
output_file = module_so
logger.info("Generated C++ source file: " + output_file)
else:
# Compile to binary
output_file = compile_cxxcode(str(module.generate()),
module_so=module_so,
**kwargs)
return output_file
def compile_pythranfile(file_path, module_so=None, module_name=None,
cpponly=False, **kwargs):
"""
Pythran file -> c++ file -> native module.
Returns the generated .so (or .cpp if `cpponly` is set to true).
"""
if not module_so:
# derive module name from input file name
basedir, basename = os.path.split(file_path)
module_name = module_name or os.path.splitext(basename)[0]
# derive destination from file name
module_so = os.path.join(basedir, module_name + ".so")
else:
# derive module name from destination module_so name
_, basename = os.path.split(module_so)
module_name = module_name or os.path.splitext(basename)[0]
# Add compiled module path to search for imported modules
sys.path.append(os.path.dirname(file_path))
compile_pythrancode(module_name, file(file_path).read(),
module_so=module_so, cpponly=cpponly, **kwargs)
return module_so
def test_compile():
'''Simple passthrough compile test.
May raises CompileError Exception.
'''
module_so = compile_cxxcode("\n".join([
"#define BOOST_PYTHON_MAX_ARITY 4",
"#include <pythonic/core.hpp>"
]))
module_so and os.remove(module_so)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Include content from CSV files as tables in Pandoc.
It is based on:
https://github.com/mb21/pandoc-placetable
https://github.com/baig/pandoc-csv2table
"""
from __future__ import print_function
import argparse
import csv
import json
import sys
from io import StringIO
import pypandoc
import requests
from pandocfilters import Table, elt, toJSONFilter, Plain
__VERSION__ = "0.1"
__AUTHOR__ = "Julien Hadley Jack <git@jlhj.de>"
# Missing constructors for Pandoc elements responsible for the column alignment in tables
ALIGNMENT = {
"l": elt("AlignLeft", 1)([]),
"c": elt("AlignCenter", 1)([]),
"r": elt("AlignRight", 1)([]),
"d": elt("AlignDefault", 1)([]),
}
def csv_table(key, value, fmt, meta):
"""
The filter that creates a table from a csv file.
:param key: The type of pandoc object
:type key: str
:param value: The contents of the object
:type value: str | list
:param fmt: The target output format
:type fmt: str
:param meta: The metadata of the document.
:type meta: dict[str, str]
:return: The created table or none if this filter doesn't apply to the element
:rtype: dict | None
"""
if not check_preconditions(key, value):
return
(_, classes, paired_attributes), content = value
paired_attributes = map_attributes(paired_attributes)
settings = generate_settings(paired_attributes, meta)
return get_table(content, settings)
def check_preconditions(key, value):
"""
Check if the filter applies to the current element in the syntax tree.
:param key: The type of pandoc object.
:type key: str
:param value: The contents of the object.
:type value: list | str
:return: ``True`` if the filter applices to the current element, ``False`` otherwise.
:rtype: bool
"""
return key == "CodeBlock" and "table" in value[0][1]
def map_attributes(attributes):
"""
By default pandoc will return a list of string for the paired attributes.
This function will create a dictionary with the elements of the list.
:param attributes: A list of in the order of: key1, value1, key2, value2,...
:type attributes: list[str]
:return: The dictionary of the paired attributes
:rtype: dict[str, str]
"""
return {key: value for key, value in attributes}
def generate_settings(paired_attributes, meta):
"""
Generates a settings object containg all the settings from the code and the metadata of the document.
:param paired_attributes: The attributes of the code.
:type paired_attributes: dict[str, str]
:param meta: The metadata of the document.
:type meta: dict[str, str]
:return: The settings
:rtype: dict[str, str | int]
"""
return {
"file_name": get_setting(["url", "file"], paired_attributes),
"caption": get_setting("caption", paired_attributes),
"content_pos": get_setting("content_pos", paired_attributes, meta, "top"),
"delimiter": get_setting("delimiter", paired_attributes, meta, ","),
"quote_char": get_setting(["quotechar", "quote_char"], paired_attributes, meta, '"'),
"header": get_setting(["header", "headers"], paired_attributes, meta, False),
"alignment": get_setting(["align", "aligns", "alignment", "alignments"], paired_attributes, meta),
"widths": get_setting(["width", "widths"], paired_attributes, meta),
"colorize": get_setting(["colorize", "colourise"], paired_attributes, meta)
}
def get_setting(key, paired_attributes, meta=None, default_value="", remove=False):
"""
Looks at the attributes of the code and the metadata of the document (in that order) and returns the value when it
finds one with the specified key.
:param key: The key or keys that should be searched for. Only the result for the first key found will be returned.
:type key: str | list[str]
:param paired_attributes: The attributes for the code.
:type paired_attributes: dict[str, str]
:param meta: The metadata of the document.
:type meta: dict[str, str]
:param default_value: The value that should be found if the key(s) can't be found.
:type default_value: str | object
:param remove: Should the setting be removed from the attributes if it was found there.
:type remove: bool
:return: The value that is associated with the key or the default value if key not found.
:rtype: str | object
"""
if not isinstance(key, list):
key = [key]
for single_key in key:
if single_key in paired_attributes:
return paired_attributes.pop(single_key) if remove else paired_attributes[single_key]
if meta is not None and single_key in meta:
return meta[single_key]
return default_value
def get_table(content, settings):
"""
Creates a table as represented in pandoc.
:param content: The content of the code block
:type content: str
:param settings: The settings of this script.
:return: The table as represented in pandoc
:rtype: dict
"""
csv_input = get_csv(content, settings)
reader = get_reader(csv_input, settings)
header = get_header(reader, settings)
csv_content = [get_row(row, settings) for row in reader]
if hasattr(csv_input, "close"):
csv_input.close()
settings["column_number"] = len(csv_content[0])
caption = get_caption(settings)
alignment = get_alignment(settings)
widths = get_widths(settings)
return Table(caption, alignment, widths, header, csv_content)
def get_csv(content, settings):
"""
Return the CSV content. This method will look at urls, files and code block content.
:param content: The code block content
:type content: str
:param settings: A dictionary with settings for this script. This method uses the "file_name" setting.
:type settings: dict[str, str]
:return: The CSV content.
:rtype: io.StringIO
"""
file_name = settings["file_name"]
if not file_name:
return StringIO(content)
if file_name.startswith("http"):
csv_result = get_content_from_url(file_name)
else:
csv_result = StringIO(open(file_name).read())
if settings["content_pos"] == "bottom":
order1, order2 = csv_result.getvalue(), content
else:
order1, order2 = content, csv_result.getvalue()
if order1:
order1 += "\n"
return StringIO(order1 + order2)
def get_content_from_url(url):
"""
Get content from an url. This method can be used to download a CSV file.
:param url: The url where the content should be loaded from.
:type url: str
:return: The content at the url.
:rtype: io.StringIO
"""
response = requests.get(url)
if not response.ok:
print("CsvTable - Couldn't download: " + url, file=sys.stderr)
return
return StringIO(response.text)
def get_reader(file, settings):
"""
Returns the CSV reader for a file.
:param file: The file for the CSV content.
:type file: io.StringIO
:param settings: The paired attributes for the code which can contain "delimiter" and "quotechar" settings.
:return: The CSV reader
:rtype: csv.reader
"""
return csv.reader(file, delimiter=settings["delimiter"], quotechar=settings["quote_char"])
def get_header(reader, settings):
"""
Returns the content of the header already formatted if a header exists.
:param reader: The csv reader
:type reader: csv.reader
:param settings: A dictionary with settings for this script. This method uses the "header" setting.
:type settings: dict[str, str]
:return: A list of the header row with the cell content as elements used by pandoc or an empty list if no header
:rtype: list
"""
header_enabled = settings["header"] and settings["header"] != "no"
return get_row(next(reader), settings) if header_enabled else []
def get_row(row, settings):
"""
Returns the content of the row already formatted.
:param row: The list of the row with the cell content as string elements
:type row: list[str]
:return: A list of the row with the cell content as elements used by pandoc
:rtype: list[list]
"""
return [format_cell(elem, settings) for elem in row]
def format_cell(content, settings):
"""
Interpret the cell content as markdown and convert it to the JSON structure used by pandoc.
This function uses `pypandoc <https://pypi.python.org/pypi/pypandoc/>`_ for the conversion.
:param content: The cell content which can contain markdown formatting
:type content: str
:return: Returns either an empty list (if content is empty) or with one "Plain" element with the JSON as content
:rtype: list
"""
if settings.get("colorize") in ["yes", "1"]:
colors = {
"\\cmark": "\\cellcolor{green!25}",
"\\xmark": "\\cellcolor{red!25}",
"(\\cmark)": "\\cellcolor{orange!25}",
"(\\xmark)": "\\cellcolor{orange!25}"
}
content = content + colors.get(content.strip(), "")
result = json.loads(pypandoc.convert(content, format='md', to="json"))
return [Plain(result[1][0]["c"])] if result[1] else []
def get_caption(settings):
if settings["caption"]:
result = json.loads(pypandoc.convert(settings["caption"], format='md', to="json"))
return result[1][0]["c"]
else:
return []
def get_alignment(settings):
"""
Returns usable alignment settings for the table columns.
The alignment can be: L (left), C (center), R (right) or d (default).
Pads the provided values if they don't exist or not of the required length with the default value.
:type settings: object
:param settings: A dictionary with settings for this script.
This method uses the "column_number" and "alignment" settings.
:type settings: dict[str, str | int]
:return: The list with alignments
:rtype: list
"""
alignment = list(settings["alignment"])
alignment = pad_element(alignment, settings["column_number"], "d")
return [ALIGNMENT.get(key.lower(), ALIGNMENT["d"]) for key in alignment]
def pad_element(element, wanted_length, pad_value):
"""
Pads the element with the pad_value so that it the length is equal to wanted_length.
If element is longer than the wanted_length, then the element will cut to that length.
:param element: The element that should be padded.
:type element: str | list
:param wanted_length: The length that the element should be.
:type wanted_length: int
:param pad_value: The value that is used for padding the element.
:return: The element
:raises ValueError: If the element is string but the pad_value is not.
"""
if isinstance(element, str) and not isinstance(pad_value, str):
raise ValueError("Value needs to be string to concatenate to string element (not {}).".format(type(pad_value)))
if len(element) < wanted_length:
if isinstance(element, list):
element += [pad_value] * (wanted_length - len(element))
else:
element += pad_value * (wanted_length - len(element))
else:
element = element[:wanted_length]
return element
def get_widths(settings):
"""
Returns width values for the table columns.
Pads the provided values if they don't exist or are missing for some columnwith the default value.
:param settings: A dictionary with settings for this script.
This method uses the "column_number" and "widths" settings.
:type settings: dict[str, str | int]
:return: The list with the widths
:rtype: list
"""
widths = [convert_to_float(width) for width in settings["widths"].split(" ")]
widths = pad_element(widths, settings["column_number"], 0.0)
return widths
def convert_to_float(text, default=0.0):
"""
Converts a string to a float. If the string can't be converted to a string, return the default.
:param text: The string to be converted to a float (e.g. "0.4")
:type text: str
:param default: The default value
:type default: float
:return: The resulting float
:rtype: float
"""
try:
return float(text)
except ValueError:
return default
def parse_arguments():
"""
Provides a minimal command line interface that shows help text and version information
:return: The arguments from the command line.
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", '--version', action='version', version='%(prog)s ' + __VERSION__)
return parser.parse_args()
def main():
"""
This is the main method that gets data from stdin,
applies the filter and returns the result to stdout.
"""
toJSONFilter(csv_table)
if __name__ == '__main__':
# parse_arguments()
main()
| |
import json
import datetime
import logging
import re
from dateutil.parser import parse
from redash.utils import JSONEncoder, parse_human_time
from redash.query_runner import *
logger = logging.getLogger(__name__)
try:
import pymongo
from bson.objectid import ObjectId
from bson.timestamp import Timestamp
from bson.son import SON
from bson.json_util import object_hook as bson_object_hook
enabled = True
except ImportError:
enabled = False
TYPES_MAP = {
str: TYPE_STRING,
unicode: TYPE_STRING,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT,
bool: TYPE_BOOLEAN,
datetime.datetime: TYPE_DATETIME,
}
class MongoDBJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
elif isinstance(o, Timestamp):
return super(MongoDBJSONEncoder, self).default(o.as_datetime())
return super(MongoDBJSONEncoder, self).default(o)
date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE)
def datetime_parser(dct):
for k, v in dct.iteritems():
if isinstance(v, basestring):
m = date_regex.findall(v)
if len(m) > 0:
dct[k] = parse(m[0], yearfirst=True)
if '$humanTime' in dct:
return parse_human_time(dct['$humanTime'])
return bson_object_hook(dct)
def parse_query_json(query):
query_data = json.loads(query, object_hook=datetime_parser)
return query_data
class MongoDB(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'connectionString': {
'type': 'string',
'title': 'Connection String'
},
'dbName': {
'type': 'string',
'title': "Database Name"
},
'replicaSetName': {
'type': 'string',
'title': 'Replica Set Name'
},
},
'required': ['connectionString', 'dbName']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration):
super(MongoDB, self).__init__(configuration)
self.syntax = 'json'
self.db_name = self.configuration["dbName"]
self.is_replica_set = True if "replicaSetName" in self.configuration and self.configuration["replicaSetName"] else False
def _get_column_by_name(self, columns, column_name):
for c in columns:
if "name" in c and c["name"] == column_name:
return c
return None
def _get_db(self):
if self.is_replica_set:
db_connection = pymongo.MongoReplicaSetClient(self.configuration["connectionString"], replicaSet=self.configuration["replicaSetName"])
else:
db_connection = pymongo.MongoClient(self.configuration["connectionString"])
return db_connection[self.db_name]
def _merge_property_names(self, columns, document):
for property in document:
if property not in columns:
columns.append(property)
def _get_collection_fields(self, db, collection_name):
# Since MongoDB is a document based database and each document doesn't have
# to have the same fields as another documet in the collection its a bit hard to
# show these attributes as fields in the schema.
#
# For now, the logic is to take the first and last documents (last is determined
# by the Natural Order (http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order)
# as we don't know the correct order. In most single server installations it would be
# find. In replicaset when reading from non master it might not return the really last
# document written.
first_document = None
last_document = None
for d in db[collection_name].find().sort([("$natural", 1)]).limit(1):
first_document = d
for d in db[collection_name].find().sort([("$natural", -1)]).limit(1):
last_document = d
columns = []
if first_document: self._merge_property_names(columns, first_document)
if last_document: self._merge_property_names(columns, last_document)
return columns
def get_schema(self, get_stats=False):
schema = {}
db = self._get_db()
for collection_name in db.collection_names():
columns = self._get_collection_fields(db, collection_name)
schema[collection_name] = { "name" : collection_name, "columns" : sorted(columns) }
return schema.values()
def run_query(self, query):
db = self._get_db()
logger.debug("mongodb connection string: %s", self.configuration['connectionString'])
logger.debug("mongodb got query: %s", query)
try:
query_data = parse_query_json(query)
except ValueError:
return None, "Invalid query format. The query is not a valid JSON."
if "collection" not in query_data:
return None, "'collection' must have a value to run a query"
else:
collection = query_data["collection"]
q = query_data.get("query", None)
f = None
aggregate = query_data.get("aggregate", None)
if aggregate:
for step in aggregate:
if "$sort" in step:
sort_list = []
for sort_item in step["$sort"]:
sort_list.append((sort_item["name"], sort_item["direction"]))
step["$sort"] = SON(sort_list)
if not aggregate:
s = None
if "sort" in query_data and query_data["sort"]:
s = []
for field in query_data["sort"]:
s.append((field["name"], field["direction"]))
if "fields" in query_data:
f = query_data["fields"]
s = None
if "sort" in query_data and query_data["sort"]:
s = []
for field_data in query_data["sort"]:
s.append((field_data["name"], field_data["direction"]))
columns = []
rows = []
cursor = None
if q or (not q and not aggregate):
if s:
cursor = db[collection].find(q, f).sort(s)
else:
cursor = db[collection].find(q, f)
if "skip" in query_data:
cursor = cursor.skip(query_data["skip"])
if "limit" in query_data:
cursor = cursor.limit(query_data["limit"])
if "count" in query_data:
cursor = cursor.count()
elif aggregate:
r = db[collection].aggregate(aggregate)
# Backwards compatibility with older pymongo versions.
#
# Older pymongo version would return a dictionary from an aggregate command.
# The dict would contain a "result" key which would hold the cursor.
# Newer ones return pymongo.command_cursor.CommandCursor.
if isinstance(r, dict):
cursor = r["result"]
else:
cursor = r
if "count" in query_data:
columns.append({
"name" : "count",
"friendly_name" : "count",
"type" : TYPE_INTEGER
})
rows.append({ "count" : cursor })
else:
for r in cursor:
for k in r:
if self._get_column_by_name(columns, k) is None:
columns.append({
"name": k,
"friendly_name": k,
"type": TYPES_MAP.get(type(r[k]), TYPE_STRING)
})
rows.append(r)
if f:
ordered_columns = []
for k in sorted(f, key=f.get):
ordered_columns.append(self._get_column_by_name(columns, k))
columns = ordered_columns
data = {
"columns": columns,
"rows": rows
}
error = None
json_data = json.dumps(data, cls=MongoDBJSONEncoder)
return json_data, error
register(MongoDB)
| |
# coding: utf-8
from __future__ import unicode_literals
import unittest
from axon import loads, dumps
from datetime import date, time, datetime, tzinfo
class DateTime10TestCase(unittest.TestCase):
def setUp(self):
pass
def test_date1(self):
v = loads('^2010-12-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^2010-12-01')
#
def test_date2(self):
v = loads('^1900-01-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^1900-01-01')
#
def test_date3(self):
v = loads('^12-01-01')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^12-01-01')
#
def test_date4(self):
v = loads('^0-00-00')[0]
self.assertEqual(type(v), date)
s = dumps([v])
self.assertEqual(s, '^0-00-00')
#
def test_time1(self):
v = loads('^00:00')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^00:00')
#
def test_time2(self):
v = loads('^23:59:59')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^23:59:59')
#
def test_time3(self):
v = loads('^23:59:59.000123')[0]
self.assertEqual(type(v), time)
s = dumps([v])
self.assertEqual(s, '^23:59:59.000123')
#
def test_time4(self):
v = loads('^23:59:59+00:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+00')
self.assertEqual(v.utcoffset().seconds, 0)
#
def test_time5(self):
v = loads('^23:59:59+01:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+01')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_time6(self):
v = loads('^23:59:59-01:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-01')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_time7(self):
v = loads('^23:59:59+12:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+12')
self.assertEqual(v.utcoffset().seconds/60, 12*60)
#
def test_time8(self):
v = loads('^23:59:59+23:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+23')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_time9(self):
v = loads('^23:59:59-23:00')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-23')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_time10(self):
v = loads('^23:59:59+3:15')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59+03:15')
self.assertEqual(v.utcoffset().seconds/60, 3*60+15)
#
def test_time11(self):
v = loads('^23:59:59-3:15')[0]
self.assertEqual(type(v), time)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^23:59:59-03:15')
self.assertEqual(v.utcoffset().seconds/60, 1440-3*60-15)
#
def test_datetime1(self):
v = loads('^2010-01-01T00:00')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T00:00')
#
def test_datetime2(self):
v = loads('^1-01-01T23:59:59')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^1-01-01T23:59:59')
#
def test_datetime3(self):
v = loads('^2010-01-01T23:59:59.000123')[0]
self.assertEqual(type(v), datetime)
self.assertEqual(v.tzinfo, None)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59.000123')
#
def test_datetime4(self):
v = loads('^2010-01-01T23:59:59+00:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+00')
self.assertEqual(v.utcoffset().seconds, 0)
#
def test_datetime5(self):
v = loads('^2010-01-01T23:59:59+01:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+01')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_datetime6(self):
v = loads('^2010-01-01T23:59:59-01:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-01')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_datetime7(self):
v = loads('^2010-01-01T23:59:59+12:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+12')
self.assertEqual(v.utcoffset().seconds/60, 12*60)
#
def test_datetime8(self):
v = loads('^2010-01-01T23:59:59+23:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+23')
self.assertEqual(v.utcoffset().seconds/60, 23*60)
#
def test_datetime9(self):
v = loads('^2010-01-01T23:59:59-23:00')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-23')
self.assertEqual(v.utcoffset().seconds/60, 60)
#
def test_datetime10(self):
v = loads('^2010-01-01T23:59:59+3:15')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59+03:15')
self.assertEqual(v.utcoffset().seconds/60, 3*60+15)
#
def test_datetime11(self):
v = loads('^2010-01-01T23:59:59-3:15')[0]
self.assertEqual(type(v), datetime)
self.assertIsInstance(v.tzinfo, tzinfo)
s = dumps([v])
self.assertEqual(s, '^2010-01-01T23:59:59-03:15')
self.assertEqual(v.utcoffset().seconds/60, 1440-3*60-15)
#
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DateTime10TestCase))
return suite
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseSnapshotsClientJSON(rest_client.RestClient):
"""Base Client class to send CRUD Volume API requests."""
def __init__(self, auth_provider):
super(BaseSnapshotsClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
self.create_resp = 200
def list_snapshots(self, params=None):
"""List all the snapshot."""
url = 'snapshots'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['snapshots']
def list_snapshots_with_detail(self, params=None):
"""List the details of all snapshots."""
url = 'snapshots/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['snapshots']
def get_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot."""
url = "snapshots/%s" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['snapshot']
def create_snapshot(self, volume_id, **kwargs):
"""
Creates a new snapshot.
volume_id(Required): id of the volume.
force: Create a snapshot even if the volume attached (Default=False)
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
post_body = {'volume_id': volume_id}
post_body.update(kwargs)
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
self.expected_success(self.create_resp, resp.status)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot."""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['snapshot']
# NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
# NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
snapshot_id=snapshot_id)
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
old_value = value = self._get_snapshot_status(snapshot_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if (value == status):
return value
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_snapshot_status(snapshot_id)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
resp, body = self.delete("snapshots/%s" % str(snapshot_id))
self.expected_success(202, resp.status)
def is_resource_deleted(self, id):
try:
self.get_snapshot(id)
except exceptions.NotFound:
return True
return False
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return resp, body
def update_snapshot_status(self, snapshot_id, status, progress):
"""Update the specified snapshot's status."""
post_body = {
'status': status,
'progress': progress
}
post_body = json.dumps({'os-update_snapshot_status': post_body})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return resp, body
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.post(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['metadata']
def get_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot."""
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['metadata']
def update_snapshot_metadata(self, snapshot_id, metadata):
"""Update metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['metadata']
def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
"""Update metadata item for the snapshot."""
put_body = json.dumps({'meta': meta_item})
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['meta']
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.delete(url)
self.expected_success(200, resp.status)
def force_delete_snapshot(self, snapshot_id):
"""Force Delete Snapshot."""
post_body = json.dumps({'os-force_delete': {}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return resp, body
class SnapshotsClientJSON(BaseSnapshotsClientJSON):
"""Client class to send CRUD Volume V1 API requests."""
| |
# The idea is to get an algorithm to find a suitable triples refugee-refugee-local-local
# Advantages: 2 on 2 should a balanced and neutral group.
# Disadvantages: it might be hard to choose 2 refugees
import networkx as nx
from get_score import *
from lat_long import *
class getdata():
def createdatabase(self,UserInfo):
print '---------------------'
location = "Laax" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Hossein",
surname="Schwarzeneger",Languages=["English", "Arabic"],
Gender="male", Gender_Pref="anyone",
DOB="1986-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Laax", Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Lausanne" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Muhammed",
surname="Housein",Languages=["Arabic","English"],
Gender="male", Gender_Pref="anyone",
DOB="2000-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location=location,Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Jordan",
surname="Ceasar",Languages=["Italian", "English"],
Gender="male", Gender_Pref="own",
DOB="1994-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location=location,Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Berlin" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Andan",
surname="Polan",Languages=["English", "Arabic"],
Gender="female", Gender_Pref="own",
DOB="1976-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location=location,Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Paris" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Milini",
surname="Kouta",Languages=["Arabic"],
Gender="female", Gender_Pref="own",
DOB="1987-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location=location,Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Munich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Maya",
surname="Fischer",Languages=["English","Arabic","German"],
Gender="female", Gender_Pref="anyone",
DOB="1956-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location=location,Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Manna",
surname="Houseein",Languages=["English","Arabic"],
Gender="female", Gender_Pref="anyone",
DOB="1990-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="refugee",firstname="Bennill",
surname="Mustafa",Languages=["English","Arabic"],
Gender="male", Gender_Pref="anyone",
DOB="1978-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Laax" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Arnold",
surname="Jr",Languages=["English","German"],
Gender="male", Gender_Pref="anyone",
DOB="1988-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Laax",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="PJ",
surname="Jayathissa",Languages=["English","Spanish"],
Gender="male", Gender_Pref="anyone",
DOB="1988-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Karoline",
surname="Davierser",Languages=["English", "German"],
Gender="female", Gender_Pref="own",
DOB="1995-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Biel" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Jonathon",
surname="paulanner",Languages=["English", "Spanish","Chinese"],
Gender="male", Gender_Pref="anyone",
DOB="1993-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Biel",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Sarah",
surname="hausser",Languages=["English","German"],
Gender="female", Gender_Pref="own",
DOB="2000-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Zurich" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Benjamin",
surname="Kuhler",Languages=["English", "German"],
Gender="male", Gender_Pref="anyone",
DOB="1968-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Zurich",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Laax" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Martina",
surname="Bechler",Languages=["English","German"],
Gender="female", Gender_Pref="own",
DOB="1970-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Laax",Latitude=latitude, Longitude=longitude)
newuser.put()
location = "Laax" #userinfo["Location"],
latitude, longitude = lat_long(location)
newuser=UserInfo(Status="local",firstname="Mika",
surname="Kirtal",Languages=["English", "German"],
Gender="female", Gender_Pref="anyone",
DOB="1980-30-6", About="lorem ipsum", Email="lorem@lorem.uk",
Location="Laax",Latitude=latitude, Longitude=longitude)
newuser.put()
def readdatabase(self, querry, node):
q.filter("Status=","refugee").fetch(limit=5)
self.local=self.q.filter("Status=","local")
print("list: " + str(database.fetch(limit = 5)))
print("node: " + str(self.node) + "\n local" + str(self.local) + "\n refugees" + str(self.refugees))
square=self.get_square(self.node, self.local, self.refugees)
return square
def get_square(self,node,local,refugees):
""" Returns a quadruplet of nodes refugee-refugee-local-local.
Incredibly rusty, but will work"""
print "printing user info: " + str(q)
frst = local
scnd = refugees
node = node
# flip the lists if you start with a refugee
if node.Status == "refugee":
print "Is a refugee"
frst = refugees
scnd = local
# Rusty as hell
biparts = {}
for friend in scnd:
for friend_of_friend in frst:
if friend_of_friend is node:
continue # skip itself
cost = get_score(node,friend) + get_score(friend, friend_of_friend)
if friend_of_friend in biparts:
biparts[friend_of_friend].append((friend, cost)) # slow
else:
biparts[friend_of_friend] = [(friend, cost)]
highest = -float("inf")
square = []
# find the square
for fof in biparts:
two_relatives = sorted(biparts[fof], key=lambda tup: tup[1])[:2]
# if the quad score is highest in the network
new_cost = two_relatives[0][1] + two_relatives[1][1]
if new_cost > highest:
square = [node,two_relatives[0][0],fof,two_relatives[1][0]]
highest = new_cost
return square;
| |
#pylint: disable = F0401
from re import sub
from java.util.UUID import fromString as juuid
from json import dumps as json_dumps, loads as json_loads
import org.bukkit as bukkit
import org.bukkit.Location as Location
import org.bukkit.entity.Player as Player
import org.bukkit.event.player.PlayerTeleportEvent.TeleportCause as TeleportCause
import org.bukkit.block as bblock
shared = {} # this dict can be used to share stuff across modules
server = bukkit.Bukkit.getServer()
def info(text):
"""
Log info to console
"""
server.getLogger().info("[RedstonerUtils] %s" % text)
def warn(text):
"""
Log warning to console
"""
server.getLogger().warning("[RedstonerUtils] %s" % text)
def error(text):
"""
Log error to console
"""
server.getLogger().severe("[RedstonerUtils] %s" % text)
def msg(player, text, usecolor = True, basecolor = None):
"""
send a message to player
the player may be None or offline, which this method just ignores
unless usecolor is False, &-codes are translated to real color codes
for that case, basecolor can be useful. basecolor accepts a single character as color code
"""
if player and (player == server.getConsoleSender() or player.getPlayer()): # getPlayer() returns None when offline
if basecolor:
if usecolor:
text = colorify(text)
player.sendMessage(colorify("&%s" % basecolor) + text)
else:
player.sendMessage(colorify(text) if usecolor else text)
def broadcast(perm, text):
"""
better than bukkit's broadcast.
bukkit only works with permissibles that are subscribed to perm
"""
text = colorify(text)
for recipient in list(server.getOnlinePlayers()) + [server.getConsoleSender()]:
if not perm or recipient.hasPermission(perm):
msg(recipient, text)
def colorify(text):
"""
replace &-codes with real color codes
"""
return sub("&(?=[?\\da-fk-or])", u"\u00A7", "%s" % text)
def stripcolors(text):
"""
strips all (real) color codes from text
"""
return sub(u"\u00A7[\\da-fk-or]", "", "%s" % text)
def safetp(player, world, x, y, z, yaw = 0, pitch = 0):
"""
teleports the player to the given Location
if the player would spawn inside blocks, the location is escalated until the location is safe
"""
tpblock = Location(world, x, y, z).getBlock()
if (tpblock.isEmpty() and tpblock.getRelative(bblock.BlockFace.UP).isEmpty()) or y > 255:
player.teleport(Location(world, x+0.5, y, z+0.5, yaw, pitch), TeleportCause.COMMAND)
else:
safetp(player, world, x, y+1, z, yaw, pitch)
def plugin_header(recipient = None, name="Redstoner Utils"):
"""
sends the recipient a "Plugin Header", in the format of: --=[ PluginName ]=--
"""
head = "\n&2--=[ %s ]=--" % name
msg(recipient, head)
return head
def noperm(player):
"""
Send the default permission failure message to the player
"""
msg(player, "&cno permission")
def runas(player, cmd):
"""
run a command as player
the cmd should no be prefixed with a /
"""
server.dispatchCommand(player, cmd)
def is_player(obj):
"""
return True when ob is a bukkit Player
"""
return (isinstance(obj, Player))
def checkargs(sender, args, amin, amax):
"""
check if a command has a valid amount of args, otherwise notify the sender
amin is the minimum amount of args
amax is the maximum amount of args
if amax is < 0, infinite args will be accepted
return True if args has a valid length, False otherwise
"""
if not (len(args) >= amin and (amax < 0 or len(args) <= amax)):
if amin == amax:
msg(sender, "&cNeeds " + str(amin) + " arguments!")
return False
elif amax < 0:
msg(sender, "&cNeeds at least " + str(amin) + " arguments!")
return False
else:
msg(sender, "&cNeeds " + str(amin) + " to " + str(amax) + " arguments!")
return False
return True
def is_creative(player):
"""
returns True if the player is in Creative mode
"""
return str(player.getGameMode()) == "CREATIVE"
def uid(player):
"""
returns the player's UUID
"""
return str(player.getUniqueId())
def retrieve_player(uuid_str):
"""
gets an offline player by UUID string
the uuid MUST contain dashes
"""
return server.getOfflinePlayer(juuid(uuid_str))
def known_player(player):
"""
to be used on OfflinePlayer (which can be online!)
returns True if the player has been on the server
this is different to HasPlayedBefore(), which will return False on first join
"""
return player.hasPlayedBefore()
def open_json_file(filename, default):
"""
opens the given json file and returns an object or returns None on error
filename is the path + name of the file.
"""
data = None
try:
with open("plugins/redstoner-utils.py.dir/files/%s.json" % filename) as obj:
data = json_loads(obj.read())
except Exception, e:
error("Failed to read from %s: %s" % (filename, e))
return (default if data is None else data)
def save_json_file(filename, obj):
"""
saves the given object as json into filename
filename is the path + name of the file.
"""
try:
with open("plugins/redstoner-utils.py.dir/files/%s.json" % filename, "w") as f:
f.write(json_dumps(obj))
except Exception, e:
error("Failed to write to %s: %s" % (filename, e))
def toggle(player, ls, add = None, name = "Toggle", on = "&a%s now on!", off = "&c%s now off!", already = "&c%s was already %s"):
"""
Toggle presence of a player's UUID in a list (ls)
'add' controls if a player should be added(True) or removed(False)
if 'add' is None, ls will simply be toggled for that player.
%s in on, off, and already is replaced with the name
when 'add' is given, but won't change anything, %s in 'already' is replaced with "ON" or "OFF"
"""
pid = uid(player)
enabled = pid in ls
# Do some checks and remove pid.
if enabled and add == False:
ls.remove(pid)
msg(player, on % name)
# Do some checks and append pid.
elif not enabled and add == True:
ls.append(pid)
msg(player, off % name)
# Already on/off (optional)
else:
msg(player, already % (name, " ON" if add else " OFF"))
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for matching coordinate catalogs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..extern import six
from .representation import UnitSphericalRepresentation
from .. import units as u
__all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d',
'search_around_sky']
def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'):
"""
Finds the nearest 3-dimensional matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord``, as in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each ``matchcoord``
and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``.
Notes
-----
This function requires `SciPy <http://www.scipy.org>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
kdt = _get_cartesian_kdtree(catalogcoord, storekdtree)
# make sure coordinate systems match
matchcoord = matchcoord.transform_to(catalogcoord)
# make sure units match
catunit = catalogcoord.cartesian.x.unit
matchxyz = matchcoord.cartesian.xyz.to(catunit)
matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3))
dist, idx = kdt.query(matchflatxyz.T, nthneighbor)
if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise
dist = dist[:, -1]
idx = idx[:, -1]
sep2d = catalogcoord[idx].separation(matchcoord)
return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit
def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_sky'):
"""
Finds the nearest on-sky matches of a coordinate or coordinates in
a set of catalog coordinates.
This finds the on-sky closest neighbor, which is only different from the
3-dimensional match if ``distance`` is set in either ``matchcoord``
or ``catalogcoord``.
Parameters
----------
matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate(s) to match to the catalog.
catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The base catalog in which to search for matches. Typically this will
be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is desired here,
as that is correct for matching one set of coordinates to another.
The next likely use case is ``2``, for matching a coordinate catalog
against *itself* (``1`` is inappropriate because each point will find
itself as the closest match).
storekdtree : bool or str, optional
If a string, will store the KD-Tree used for the computation
in the ``catalogcoord`` in ``catalogcoord.cache`` with the
provided name. This dramatically speeds up subsequent calls with the
same catalog. If False, the KD-Tree is discarded after use.
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for each
``matchcoord``. Shape matches ``matchcoord``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each ``matchcoord`` and
the ``matchcoord``. Shape matches ``matchcoord``. If either
``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D
distance on the unit sphere, rather than a true distance.
Notes
-----
This function requires `SciPy <http://www.scipy.org>`_ to be installed
or it will fail.
"""
if catalogcoord.isscalar or len(catalogcoord) < 1:
raise ValueError('The catalog for coordinate matching cannot be a '
'scalar or length-0.')
# send to catalog frame
newmatch = matchcoord.transform_to(catalogcoord)
# strip out distance info
match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation)
newmatch_u = newmatch.realize_frame(match_urepr)
cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation)
newcat_u = catalogcoord.realize_frame(cat_urepr)
# Check for a stored KD-tree on the passed-in coordinate. Normally it will
# have a distinct name from the "3D" one, so it's safe to use even though
# it's based on UnitSphericalRepresentation.
storekdtree = catalogcoord.cache.get(storekdtree, storekdtree)
idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree)
# sep3d is *wrong* above, because the distance information was removed,
# unless one of the catalogs doesn't have a real distance
if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or
isinstance(newmatch.data, UnitSphericalRepresentation)):
sep3d = catalogcoord[idx].separation_3d(newmatch)
# update the kdtree on the actual passed-in coordinate
if isinstance(storekdtree, six.string_types):
catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree]
elif storekdtree is True:
# the old backwards-compatible name
catalogcoord.cache['kdtree'] = newcat_u.cache['kdtree']
return idx, sep2d, sep3d
def search_around_3d(coords1, coords2, distlimit, storekdtree='kdtree_3d'):
"""
Searches for pairs of points that are at least as close as a specified
distance in 3D space.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the
``separation_3d`` methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1`` and
``idx2``. The unit is that of ``coords1``.
Notes
-----
This function requires `SciPy <http://www.scipy.org>`_ (>=0.12.0)
to be installed or it will fail.
If you are using this function to search in a catalog for matches around
specific points, the convention is for ``coords2`` to be the catalog, and
``coords1`` are the points to search around. While these operations are
mathematically the same if ``coords1`` and ``coords2`` are flipped, some of
the optimizations may work better if this convention is obeyed.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
if not distlimit.isscalar:
raise ValueError('distlimit must be a scalar in search_around_3d')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_3d is a scalar. '
'search_around_3d is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation_3d(coord2) < distlimit`` to find '
'the coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
return (np.array([], dtype=np.int), np.array([], dtype=np.int),
u.Quantity([], u.deg),
u.Quantity([], coords1.distance.unit))
kdt2 = _get_cartesian_kdtree(coords2, storekdtree)
cunit = coords2.cartesian.x.unit
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit)
# this is the *cartesian* 3D distance that corresponds to the given angle
d = distlimit.to_value(cunit)
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=np.int)
idxs2 = np.array(idxs2, dtype=np.int)
if idxs1.size == 0:
d2ds = u.Quantity([], u.deg)
d3ds = u.Quantity([], coords1.distance.unit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
return idxs1, idxs2, d2ds, d3ds
def search_around_sky(coords1, coords2, seplimit, storekdtree='kdtree_sky'):
"""
Searches for pairs of points that have an angular separation at least as
close as a specified angle.
This is intended for use on coordinate objects with arrays of coordinates,
not scalars. For scalar coordinates, it is better to use the ``separation``
methods.
Parameters
----------
coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The first set of coordinates, which will be searched for matches from
``coords2`` within ``seplimit``. Cannot be a scalar coordinate.
coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The second set of coordinates, which will be searched for matches from
``coords1`` within ``seplimit``. Cannot be a scalar coordinate.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
storekdtree : bool or str, optional
If a string, will store the KD-Tree used in the search with the name
``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls
to this function. If False, the KD-Trees are not saved.
Returns
-------
idx1 : integer array
Indices into ``coords1`` that matches to the corresponding element of
``idx2``. Shape matches ``idx2``.
idx2 : integer array
Indices into ``coords2`` that matches to the corresponding element of
``idx1``. Shape matches ``idx1``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches ``idx1``
and ``idx2``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches ``idx1``
and ``idx2``; the unit is that of ``coords1``.
If either ``coords1`` or ``coords2`` don't have a distance,
this is the 3D distance on the unit sphere, rather than a
physical distance.
Notes
-----
This function requires `SciPy <http://www.scipy.org>`_ (>=0.12.0)
to be installed or it will fail.
In the current implementation, the return values are always sorted in the
same order as the ``coords1`` (so ``idx1`` is in ascending order). This is
considered an implementation detail, though, so it could change in a future
release.
"""
from . import Angle
if not seplimit.isscalar:
raise ValueError('seplimit must be a scalar in search_around_sky')
if coords1.isscalar or coords2.isscalar:
raise ValueError('One of the inputs to search_around_sky is a scalar. '
'search_around_sky is intended for use with array '
'coordinates, not scalars. Instead, use '
'``coord1.separation(coord2) < seplimit`` to find the '
'coordinates near a scalar coordinate.')
if len(coords1) == 0 or len(coords2) == 0:
# Empty array input: return empty match
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
return (np.array([], dtype=np.int), np.array([], dtype=np.int),
u.Quantity([], u.deg),
u.Quantity([], distunit))
# we convert coord1 to match coord2's frame. We do it this way
# so that if the conversion does happen, the KD tree of coord2 at least gets
# saved. (by convention, coord2 is the "catalog" if that makes sense)
coords1 = coords1.transform_to(coords2)
# strip out distance info
urepr1 = coords1.data.represent_as(UnitSphericalRepresentation)
ucoords1 = coords1.realize_frame(urepr1)
kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree)
if storekdtree and coords2.cache.get(storekdtree):
# just use the stored KD-Tree
kdt2 = coords2.cache[storekdtree]
else:
# strip out distance info
urepr2 = coords2.data.represent_as(UnitSphericalRepresentation)
ucoords2 = coords2.realize_frame(urepr2)
kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree)
if storekdtree:
# save the KD-Tree in coords2, *not* ucoords2
coords2.cache['kdtree' if storekdtree is True else storekdtree] = kdt2
# this is the *cartesian* 3D distance that corresponds to the given angle
r = (2 * np.sin(Angle(seplimit) / 2.0)).value
idxs1 = []
idxs2 = []
for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)):
for match in matches:
idxs1.append(i)
idxs2.append(match)
idxs1 = np.array(idxs1, dtype=np.int)
idxs2 = np.array(idxs2, dtype=np.int)
if idxs1.size == 0:
if coords2.distance.unit == u.dimensionless_unscaled:
distunit = u.dimensionless_unscaled
else:
distunit = coords1.distance.unit
d2ds = u.Quantity([], u.deg)
d3ds = u.Quantity([], distunit)
else:
d2ds = coords1[idxs1].separation(coords2[idxs2])
try:
d3ds = coords1[idxs1].separation_3d(coords2[idxs2])
except ValueError:
# they don't have distances, so we just fall back on the cartesian
# distance, computed from d2ds
d3ds = 2 * np.sin(d2ds / 2.0)
return idxs1, idxs2, d2ds, d3ds
def _get_cartesian_kdtree(coord, attrname_or_kdt='kdtree', forceunit=None):
"""
This is a utility function to retrieve (and build/cache, if necessary)
a 3D cartesian KD-Tree from various sorts of astropy coordinate objects.
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinates to build the KD-Tree for.
attrname_or_kdt : bool or str or KDTree
If a string, will store the KD-Tree used for the computation in the
``coord``, in ``coord.cache`` with the provided name. If given as a
KD-Tree, it will just be used directly.
forceunit : unit or None
If a unit, the cartesian coordinates will convert to that unit before
being put in the KD-Tree. If None, whatever unit it's already in
will be used
Returns
-------
kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree`
The KD-Tree representing the 3D cartesian representation of the input
coordinates.
"""
from warnings import warn
# without scipy this will immediately fail
from scipy import spatial
try:
KDTree = spatial.cKDTree
except Exception:
warn('C-based KD tree not found, falling back on (much slower) '
'python implementation')
KDTree = spatial.KDTree
if attrname_or_kdt is True: # backwards compatibility for pre v0.4
attrname_or_kdt = 'kdtree'
# figure out where any cached KDTree might be
if isinstance(attrname_or_kdt, six.string_types):
kdt = coord.cache.get(attrname_or_kdt, None)
if kdt is not None and not isinstance(kdt, KDTree):
raise TypeError('The `attrname_or_kdt` "{0}" is not a scipy KD tree!'.format(attrname_or_kdt))
elif isinstance(attrname_or_kdt, KDTree):
kdt = attrname_or_kdt
attrname_or_kdt = None
elif not attrname_or_kdt:
kdt = None
else:
raise TypeError('Invalid `attrname_or_kdt` argument for KD-Tree:' +
str(attrname_or_kdt))
if kdt is None:
# need to build the cartesian KD-tree for the catalog
if forceunit is None:
cartxyz = coord.cartesian.xyz
else:
cartxyz = coord.cartesian.xyz.to(forceunit)
flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3))
kdt = KDTree(flatxyz.value.T)
if attrname_or_kdt:
# cache the kdtree in `coord`
coord.cache[attrname_or_kdt] = kdt
return kdt
| |
import asyncio
import io
import mimetypes
import os
from abc import ABC, abstractmethod
from multidict import CIMultiDict
from . import hdrs
from .helpers import (content_disposition_header, guess_filename,
parse_mimetype, sentinel)
from .streams import DEFAULT_LIMIT, DataQueue, EofStream, StreamReader
__all__ = ('PAYLOAD_REGISTRY', 'get_payload', 'payload_type', 'Payload',
'BytesPayload', 'StringPayload', 'StreamReaderPayload',
'IOBasePayload', 'BytesIOPayload', 'BufferedReaderPayload',
'TextIOPayload', 'StringIOPayload')
class LookupError(Exception):
pass
def get_payload(data, *args, **kwargs):
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(factory, type):
PAYLOAD_REGISTRY.register(factory, type)
class payload_type:
def __init__(self, type):
self.type = type
def __call__(self, factory):
register_payload(factory, self.type)
return factory
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self):
self._registry = []
def get(self, data, *args, **kwargs):
if isinstance(data, Payload):
return data
for factory, type in self._registry:
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(self, factory, type):
self._registry.append((factory, type))
class Payload(ABC):
_size = None
_headers = None
_content_type = 'application/octet-stream'
def __init__(self, value, *, headers=None,
content_type=sentinel, filename=None, encoding=None):
self._value = value
self._encoding = encoding
self._filename = filename
if headers is not None:
self._headers = CIMultiDict(headers)
if content_type is sentinel and hdrs.CONTENT_TYPE in self._headers:
content_type = self._headers[hdrs.CONTENT_TYPE]
if content_type is sentinel:
content_type = None
self._content_type = content_type
@property
def size(self):
"""Size of the payload."""
return self._size
@property
def filename(self):
"""Filename of the payload."""
return self._filename
@property
def headers(self):
"""Custom item headers"""
return self._headers
@property
def encoding(self):
"""Payload encoding"""
return self._encoding
@property
def content_type(self):
"""Content type"""
if self._content_type is not None:
return self._content_type
elif self._filename is not None:
mime = mimetypes.guess_type(self._filename)[0]
return 'application/octet-stream' if mime is None else mime
else:
return Payload._content_type
def set_content_disposition(self, disptype, quote_fields=True, **params):
"""Sets ``Content-Disposition`` header.
:param str disptype: Disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
:param dict params: Disposition params
"""
if self._headers is None:
self._headers = CIMultiDict()
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, **params)
@asyncio.coroutine # pragma: no branch
@abstractmethod
def write(self, writer):
"""Write payload
:param AbstractPayloadWriter writer:
"""
class BytesPayload(Payload):
def __init__(self, value, *args, **kwargs):
assert isinstance(value, (bytes, bytearray, memoryview)), \
"value argument must be byte-ish (%r)" % type(value)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/octet-stream'
super().__init__(value, *args, **kwargs)
self._size = len(value)
@asyncio.coroutine
def write(self, writer):
yield from writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
*_, params = parse_mimetype(content_type)
encoding = params.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value.encode(encoding),
encoding=encoding, content_type=content_type, *args, **kwargs)
class IOBasePayload(Payload):
def __init__(self, value, *args, **kwargs):
if 'filename' not in kwargs:
kwargs['filename'] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None:
self.set_content_disposition('attachment', filename=self._filename)
@asyncio.coroutine
def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
yield from writer.write(chunk)
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class TextIOPayload(IOBasePayload):
def __init__(self, value, *args,
encoding=None, content_type=None, **kwargs):
if encoding is None:
if content_type is None:
encoding = 'utf-8'
content_type = 'text/plain; charset=utf-8'
else:
*_, params = parse_mimetype(content_type)
encoding = params.get('charset', 'utf-8')
else:
if content_type is None:
content_type = 'text/plain; charset=%s' % encoding
super().__init__(
value,
content_type=content_type, encoding=encoding, *args, **kwargs)
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
@asyncio.coroutine
def write(self, writer):
try:
chunk = self._value.read(DEFAULT_LIMIT)
while chunk:
yield from writer.write(chunk.encode(self._encoding))
chunk = self._value.read(DEFAULT_LIMIT)
finally:
self._value.close()
class StringIOPayload(TextIOPayload):
@property
def size(self):
return len(self._value.getvalue()) - self._value.tell()
class BytesIOPayload(IOBasePayload):
@property
def size(self):
return len(self._value.getbuffer()) - self._value.tell()
class BufferedReaderPayload(IOBasePayload):
@property
def size(self):
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class StreamReaderPayload(Payload):
@asyncio.coroutine
def write(self, writer):
chunk = yield from self._value.read(DEFAULT_LIMIT)
while chunk:
yield from writer.write(chunk)
chunk = yield from self._value.read(DEFAULT_LIMIT)
class DataQueuePayload(Payload):
@asyncio.coroutine
def write(self, writer):
while True:
try:
chunk = yield from self._value.read()
if not chunk:
break
yield from writer.write(chunk)
except EofStream:
break
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(
BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
PAYLOAD_REGISTRY.register(
StreamReaderPayload, (asyncio.StreamReader, StreamReader))
PAYLOAD_REGISTRY.register(DataQueuePayload, DataQueue)
| |
# -*- test-case-name: txdav.carddav.datastore,txdav.carddav.datastore.test.test_sql.AddressBookSQLStorageTests -*-
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for common addressbook store API functions.
"""
from twext.python.filepath import CachingFilePath as FilePath
from txweb2.http import HTTPError
from txweb2.responsecode import FORBIDDEN
from twisted.internet.defer import inlineCallbacks, returnValue, maybeDeferred
from twisted.python import hashlib
from twistedcaldav.vcard import Component as VComponent
from txdav.base.propertystore.base import PropertyName
from txdav.carddav.iaddressbookstore import IAddressBookObject, IAddressBookHome, \
IAddressBook, IAddressBookTransaction
from txdav.common.datastore.test.util import CommonCommonTests
from txdav.common.icommondatastore import InvalidUIDError
from txdav.common.icommondatastore import ICommonTransaction
from txdav.common.icommondatastore import InvalidObjectResourceError
from txdav.common.icommondatastore import NoSuchHomeChildError
from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError
from txdav.idav import IPropertyStore, IDataStore
from txdav.xml.element import WebDAVUnknownElement
from calendarserver.push.util import PushPriority
storePath = FilePath(__file__).parent().child("addressbook_store")
home1Root = storePath.child("ho").child("me").child("home1")
home2Root = storePath.child("ho").child("me").child("home2")
home3Root = storePath.child("ho").child("me").child("home3")
adbk1Root = home1Root.child("addressbook")
adbk2Root = home2Root.child("addressbook")
adbk3Root = home3Root.child("addressbook")
addressbook1_objectNames = [
"1.vcf",
"2.vcf",
"3.vcf",
]
home1_addressbookNames = [
"addressbook",
]
addressbook2_objectNames = [
"1.vcf",
"2.vcf",
"3.vcf",
"4.vcf",
"5.vcf",
]
home2_addressbookNames = [
"addressbook",
]
addressbook3_objectNames = [
"1.vcf",
"2.vcf",
"3.vcf",
"4.vcf",
"5.vcf",
"6.vcf",
]
home3_addressbookNames = [
"addressbook",
]
vcard4_text = (
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default;;;
FN:Default Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:lthompson@example.com
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid4
END:VCARD
""".replace("\n", "\r\n")
)
vcard4notCardDAV_text = (# Missing UID, N and FN
"""BEGIN:VCARD
VERSION:3.0
EMAIL;type=INTERNET;type=WORK;type=pref:lthompson@example.com
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
END:VCARD
""".replace("\n", "\r\n")
)
vcard1modified_text = vcard4_text.replace(
"\r\nUID:uid4\r\n",
"\r\nUID:uid1\r\n"
)
class CommonTests(CommonCommonTests):
"""
Tests for common functionality of interfaces defined in
L{txdav.carddav.iaddressbookstore}.
"""
md5Values = (
hashlib.md5("1234").hexdigest(),
hashlib.md5("5678").hexdigest(),
hashlib.md5("9ABC").hexdigest(),
hashlib.md5("DEFG").hexdigest(),
hashlib.md5("HIJK").hexdigest(),
hashlib.md5("LMNO").hexdigest(),
)
requirements = {
"home1": {
"addressbook": {
"1.vcf": adbk1Root.child("1.vcf").getContent(),
"2.vcf": adbk1Root.child("2.vcf").getContent(),
"3.vcf": adbk1Root.child("3.vcf").getContent(),
},
"not_a_addressbook": None
},
"home2": {
"addressbook": {
"1.vcf": adbk2Root.child("1.vcf").getContent(),
"2.vcf": adbk2Root.child("2.vcf").getContent(),
"3.vcf": adbk2Root.child("3.vcf").getContent(),
"4.vcf": adbk2Root.child("4.vcf").getContent(),
"5.vcf": adbk2Root.child("5.vcf").getContent(),
},
},
"home3": {
"addressbook": {
"1.vcf": adbk3Root.child("1.vcf").getContent(),
"2.vcf": adbk3Root.child("2.vcf").getContent(),
"3.vcf": adbk3Root.child("3.vcf").getContent(),
"4.vcf": adbk3Root.child("4.vcf").getContent(),
"5.vcf": adbk3Root.child("5.vcf").getContent(),
"6.vcf": adbk3Root.child("6.vcf").getContent(),
},
},
}
md5s = {
"home1": {
"addressbook": {
"1.vcf": md5Values[0],
"2.vcf": md5Values[1],
"3.vcf": md5Values[2],
},
"not_a_addressbook": None
},
"home2": {
"addressbook": {
"1.vcf": md5Values[0],
"2.vcf": md5Values[1],
"3.vcf": md5Values[2],
"4.vcf": md5Values[3],
"5.vcf": md5Values[4],
},
},
"home3": {
"addressbook": {
"1.vcf": md5Values[0],
"2.vcf": md5Values[1],
"3.vcf": md5Values[2],
"4.vcf": md5Values[3],
"5.vcf": md5Values[4],
"6.vcf": md5Values[5],
},
},
}
def homeUnderTest(self, txn=None, name=None):
"""
Get the addressbook home detailed by C{requirements['home1']}.
"""
return (
txn.addressbookHomeWithUID(name if name else "home1")
if txn
else self.transactionUnderTest().addressbookHomeWithUID(name if name else "home1")
)
@inlineCallbacks
def addressbookUnderTest(self, txn=None, name=None, home="home1"):
"""
Get the addressbook detailed by C{requirements['home1']['addressbook']}.
"""
returnValue((yield (yield self.homeUnderTest(txn=txn, name=home))
.addressbookWithName(name if name else "addressbook")))
@inlineCallbacks
def addressbookObjectUnderTest(self, txn=None, name=None, addressbook_name="addressbook", home="home1"):
"""
Get the addressbook detailed by
C{requirements['home1']['addressbook']['1.vcf']}.
"""
returnValue((yield (yield self.addressbookUnderTest(txn=txn, name=addressbook_name, home=home))
.addressbookObjectWithName(name if name else "1.vcf")))
def test_addressbookStoreProvides(self):
"""
The addressbook store provides L{IAddressBookStore} and its required
attributes.
"""
addressbookStore = self.storeUnderTest()
self.assertProvides(IDataStore, addressbookStore)
def test_transactionProvides(self):
"""
The transactions generated by the addressbook store provide
L{IAddressBookStoreTransaction} and its required attributes.
"""
txn = self.transactionUnderTest()
self.assertProvides(ICommonTransaction, txn)
self.assertProvides(IAddressBookTransaction, txn)
@inlineCallbacks
def test_homeProvides(self):
"""
The addressbook homes generated by the addressbook store provide
L{IAddressBookHome} and its required attributes.
"""
self.assertProvides(IAddressBookHome, (yield self.homeUnderTest()))
@inlineCallbacks
def test_addressbookProvides(self):
"""
The addressbooks generated by the addressbook store provide L{IAddressBook} and
its required attributes.
"""
self.assertProvides(IAddressBook, (yield self.addressbookUnderTest()))
@inlineCallbacks
def test_addressbookObjectProvides(self):
"""
The addressbook objects generated by the addressbook store provide
L{IAddressBookObject} and its required attributes.
"""
self.assertProvides(IAddressBookObject,
(yield self.addressbookObjectUnderTest()))
@inlineCallbacks
def test_notifierID(self):
home = yield self.homeUnderTest()
self.assertEquals(home.notifierID(), ("CardDAV", "home1",))
addressbook = yield home.addressbookWithName("addressbook")
self.assertEquals(addressbook.notifierID(), ("CardDAV", "home1/addressbook",))
@inlineCallbacks
def test_addressbookHomeWithUID_exists(self):
"""
Finding an existing addressbook home by UID results in an object that
provides L{IAddressBookHome} and has a C{uid()} method that returns the
same value that was passed in.
"""
addressbookHome = (yield self.transactionUnderTest()
.addressbookHomeWithUID("home1"))
self.assertEquals(addressbookHome.uid(), "home1")
self.assertProvides(IAddressBookHome, addressbookHome)
@inlineCallbacks
def test_addressbookHomeWithUID_absent(self):
"""
L{IAddressBookStoreTransaction.addressbookHomeWithUID} should return C{None}
when asked for a non-existent addressbook home.
"""
txn = self.transactionUnderTest()
self.assertEquals((yield txn.addressbookHomeWithUID("xyzzy")), None)
@inlineCallbacks
def test_addressbookWithName_exists(self):
"""
L{IAddressBookHome.addressbookWithName} returns an L{IAddressBook} provider,
whose name matches the one passed in.
"""
home = yield self.homeUnderTest()
for name in home1_addressbookNames:
addressbook = yield home.addressbookWithName(name)
if addressbook is None:
self.fail("addressbook %r didn't exist" % (name,))
self.assertProvides(IAddressBook, addressbook)
self.assertEquals(addressbook.name(), name)
@inlineCallbacks
def test_addressbookRename(self):
"""
L{IAddressBook.rename} changes the name of the L{IAddressBook}.
"""
home = yield self.homeUnderTest()
addressbook = yield home.addressbookWithName("addressbook")
try:
yield addressbook.rename("some-other-name")
except HTTPError, e:
self.assertEquals(e.response.code, FORBIDDEN)
@inlineCallbacks
def test_addressbookWithName_absent(self):
"""
L{IAddressBookHome.addressbookWithName} returns C{None} for addressbooks which
do not exist.
"""
self.assertEquals(
(yield (yield self.homeUnderTest()).addressbookWithName("xyzzy")),
None)
@inlineCallbacks
def test_createAddressBookWithName_absent(self):
"""
L{IAddressBookHome.createAddressBookWithName} creates a new L{IAddressBook} that
can be retrieved with L{IAddressBookHome.addressbookWithName}.
"""
home = yield self.homeUnderTest()
name = "addressbook"
#self.assertIdentical((yield home.addressbookWithName(name)), None)
yield home.removeAddressBookWithName(name)
self.assertNotIdentical((yield home.addressbookWithName(name)), None)
# notify is called prior to commit
self.assertTrue(("/CardDAV/example.com/home1/", PushPriority.high) in self.notifierFactory.history)
yield self.commit()
# Make sure it's available in a new transaction; i.e. test the commit.
home = yield self.homeUnderTest()
self.assertNotIdentical((yield home.addressbookWithName(name)), None)
@inlineCallbacks
def test_removeAddressBookWithName_exists(self):
"""
L{IAddressBookHome.removeAddressBookWithName} removes a addressbook that already
exists.
"""
home = yield self.homeUnderTest()
# FIXME: test transactions
for name in home1_addressbookNames:
self.assertNotIdentical((yield home.addressbookWithName(name)), None)
yield home.removeAddressBookWithName(name)
# address book is not deleted, but cleared
ab = yield home.addressbookWithName(name)
self.assertEquals((yield ab.listAddressBookObjects()), [])
# notify is called prior to commit
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CardDAV/example.com/home1/", PushPriority.high),
("/CardDAV/example.com/home1/addressbook/", PushPriority.high),
])
)
yield self.commit()
@inlineCallbacks
def test_removeAddressBookWithName_absent(self):
"""
Attempt to remove an non-existing addressbook should raise.
"""
home = yield self.homeUnderTest()
yield self.failUnlessFailure(
maybeDeferred(home.removeAddressBookWithName, "xyzzy"),
NoSuchHomeChildError
)
@inlineCallbacks
def test_addressbookObjects(self):
"""
L{IAddressBook.addressbookObjects} will enumerate the addressbook objects present
in the filesystem, in name order, but skip those with hidden names.
"""
addressbook1 = yield self.addressbookUnderTest()
addressbookObjects = list((yield addressbook1.addressbookObjects()))
for addressbookObject in addressbookObjects:
self.assertProvides(IAddressBookObject, addressbookObject)
self.assertEquals(
(yield addressbook1.addressbookObjectWithName(addressbookObject.name())),
addressbookObject
)
self.assertEquals(
set(o.name() for o in addressbookObjects),
set(addressbook1_objectNames)
)
@inlineCallbacks
def test_addressbookObjectsWithRemovedObject(self):
"""
L{IAddressBook.addressbookObjects} skips those objects which have been
removed by L{AddressBookObject.remove} in the same
transaction, even if it has not yet been committed.
"""
addressbook1 = yield self.addressbookUnderTest()
obj1 = yield addressbook1.addressbookObjectWithName("2.vcf")
yield obj1.remove()
addressbookObjects = list((yield addressbook1.addressbookObjects()))
self.assertEquals(set(o.name() for o in addressbookObjects),
set(addressbook1_objectNames) - set(["2.vcf"]))
@inlineCallbacks
def test_ownerAddressBookHome(self):
"""
L{IAddressBook.ownerAddressBookHome} should match the home UID.
"""
self.assertEquals(
(yield self.addressbookUnderTest()).ownerAddressBookHome().uid(),
(yield self.homeUnderTest()).uid()
)
@inlineCallbacks
def test_addressbookObjectWithName_exists(self):
"""
L{IAddressBook.addressbookObjectWithName} returns an L{IAddressBookObject}
provider for addressbooks which already exist.
"""
addressbook1 = yield self.addressbookUnderTest()
for name in addressbook1_objectNames:
addressbookObject = yield addressbook1.addressbookObjectWithName(name)
self.assertProvides(IAddressBookObject, addressbookObject)
self.assertEquals(addressbookObject.name(), name)
# FIXME: add more tests based on CommonTests.requirements
@inlineCallbacks
def test_addressbookObjectWithName_absent(self):
"""
L{IAddressBook.addressbookObjectWithName} returns C{None} for addressbooks which
don't exist.
"""
addressbook1 = yield self.addressbookUnderTest()
self.assertEquals((yield addressbook1.addressbookObjectWithName("xyzzy")), None)
@inlineCallbacks
def test_AddressBookObject_remove_exists(self):
"""
Remove an existing addressbook object.
"""
addressbook = yield self.addressbookUnderTest()
for name in addressbook1_objectNames:
uid = (u'uid' + name.rstrip(".vcf"))
obj1 = (yield addressbook.addressbookObjectWithUID(uid))
self.assertNotIdentical(
obj1,
None
)
yield obj1.remove()
self.assertEquals(
(yield addressbook.addressbookObjectWithUID(uid)),
None
)
self.assertEquals(
(yield addressbook.addressbookObjectWithName(name)),
None
)
@inlineCallbacks
def test_AddressBookObject_remove(self):
"""
Remove an existing addressbook object.
"""
addressbook = yield self.addressbookUnderTest()
for name in addressbook1_objectNames:
obj1 = (yield addressbook.addressbookObjectWithName(name))
self.assertNotIdentical(obj1, None)
yield obj1.remove()
self.assertIdentical(
(yield addressbook.addressbookObjectWithName(name)), None
)
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CardDAV/example.com/home1/", PushPriority.high),
("/CardDAV/example.com/home1/addressbook/", PushPriority.high),
])
)
@inlineCallbacks
def test_addressbookName(self):
"""
L{AddressBook.name} reflects the name of the addressbook.
"""
self.assertEquals((yield self.addressbookUnderTest()).name(), "addressbook")
@inlineCallbacks
def test_addressbookObjectName(self):
"""
L{IAddressBookObject.name} reflects the name of the addressbook object.
"""
self.assertEquals(
(yield self.addressbookObjectUnderTest()).name(),
"1.vcf")
@inlineCallbacks
def test_addressbookObjectMetaData(self):
"""
The objects retrieved from the addressbook have various
methods which return metadata values.
"""
adbk = yield self.addressbookObjectUnderTest()
self.assertIsInstance(adbk.name(), basestring)
self.assertIsInstance(adbk.uid(), basestring)
self.assertIsInstance(adbk.md5(), basestring)
self.assertIsInstance(adbk.size(), int)
self.assertIsInstance(adbk.created(), int)
self.assertIsInstance(adbk.modified(), int)
@inlineCallbacks
def test_component(self):
"""
L{IAddressBookObject.component} returns a L{VComponent} describing the
addressbook data underlying that addressbook object.
"""
component = yield (yield self.addressbookObjectUnderTest()).component()
self.failUnless(
isinstance(component, VComponent),
component
)
self.assertEquals(component.name(), "VCARD")
self.assertEquals(component.resourceUID(), "uid1")
@inlineCallbacks
def test_iAddressBookText(self):
"""
L{IAddressBookObject.iAddressBookText} returns a C{str} describing the same
data provided by L{IAddressBookObject.component}.
"""
text = yield (yield self.addressbookObjectUnderTest())._text()
self.assertIsInstance(text, str)
self.failUnless(text.startswith("BEGIN:VCARD\r\n"))
self.assertIn("\r\nUID:uid1\r\n", text)
self.failUnless(text.endswith("\r\nEND:VCARD\r\n"))
@inlineCallbacks
def test_addressbookObjectUID(self):
"""
L{IAddressBookObject.uid} returns a C{str} describing the C{UID} property
of the addressbook object's component.
"""
self.assertEquals((yield self.addressbookObjectUnderTest()).uid(), "uid1")
@inlineCallbacks
def test_addressbookObjectWithUID_absent(self):
"""
L{IAddressBook.addressbookObjectWithUID} returns C{None} for addressbooks which
don't exist.
"""
addressbook1 = yield self.addressbookUnderTest()
self.assertEquals(
(yield addressbook1.addressbookObjectWithUID("xyzzy")),
None
)
@inlineCallbacks
def test_addressbooks(self):
"""
L{IAddressBookHome.addressbooks} returns an iterable of L{IAddressBook}
providers, which are consistent with the results from
L{IAddressBook.addressbookWithName}.
"""
# Add a dot directory to make sure we don't find it
# self.home1._path.child(".foo").createDirectory()
home = yield self.homeUnderTest()
addressbooks = list((yield home.addressbooks()))
for addressbook in addressbooks:
self.assertProvides(IAddressBook, addressbook)
self.assertEquals(
addressbook,
(yield home.addressbookWithName(addressbook.name()))
)
self.assertEquals(
set(c.name() for c in addressbooks),
set(home1_addressbookNames)
)
@inlineCallbacks
def test_loadAllAddressBooks(self):
"""
L{IAddressBookHome.loadAddressBooks} returns an iterable of L{IAddressBook}
providers, which are consistent with the results from
L{IAddressBook.addressbookWithName}.
"""
# Add a dot directory to make sure we don't find it
# self.home1._path.child(".foo").createDirectory()
home = yield self.homeUnderTest()
addressbooks = (yield home.loadAddressbooks())
for addressbook in addressbooks:
self.assertProvides(IAddressBook, addressbook)
self.assertEquals(addressbook,
(yield home.addressbookWithName(addressbook.name())))
self.assertEquals(
set(c.name() for c in addressbooks),
set(home1_addressbookNames)
)
for c in addressbooks:
self.assertTrue(c.properties() is not None)
@inlineCallbacks
def test_createAddressBookObjectWithName_absent(self):
"""
L{IAddressBook.createAddressBookObjectWithName} creates a new
L{IAddressBookObject}.
"""
addressbook1 = yield self.addressbookUnderTest()
name = "4.vcf"
self.assertIdentical((yield addressbook1.addressbookObjectWithName(name)), None)
component = VComponent.fromString(vcard4_text)
yield addressbook1.createAddressBookObjectWithName(name, component)
addressbookObject = yield addressbook1.addressbookObjectWithName(name)
self.assertEquals((yield addressbookObject.component()), component)
# notify is called prior to commit
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CardDAV/example.com/home1/", PushPriority.high),
("/CardDAV/example.com/home1/addressbook/", PushPriority.high),
])
)
yield self.commit()
@inlineCallbacks
def test_createAddressBookObjectWithName_exists(self):
"""
L{IAddressBook.createAddressBookObjectWithName} raises
L{AddressBookObjectNameAlreadyExistsError} if a addressbook object with the
given name already exists in that addressbook.
"""
yield self.failUnlessFailure(
maybeDeferred(
(yield self.addressbookUnderTest()).createAddressBookObjectWithName,
"1.vcf", VComponent.fromString(vcard4_text)),
ObjectResourceNameAlreadyExistsError
)
@inlineCallbacks
def test_createAddressBookObjectWithName_invalid(self):
"""
L{IAddressBook.createAddressBookObjectWithName} raises
L{InvalidAddressBookComponentError} if presented with invalid iAddressBook
text.
"""
yield self.failUnlessFailure(
maybeDeferred((yield self.addressbookUnderTest())
.createAddressBookObjectWithName,
"new", VComponent.fromString(vcard4notCardDAV_text)),
InvalidObjectResourceError
)
@inlineCallbacks
def test_setComponent_invalid(self):
"""
L{IAddressBookObject.setComponent} raises L{InvalidIAddressBookDataError} if
presented with invalid iAddressBook text.
"""
addressbookObject = (yield self.addressbookObjectUnderTest())
yield self.failUnlessFailure(
maybeDeferred(addressbookObject.setComponent,
VComponent.fromString(vcard4notCardDAV_text)),
InvalidObjectResourceError
)
@inlineCallbacks
def test_setComponent_uidchanged(self):
"""
L{IAddressBookObject.setComponent} raises
L{InvalidAddressBookComponentError} when given a L{VComponent} whose
UID does not match its existing UID.
"""
addressbook1 = yield self.addressbookUnderTest()
component = VComponent.fromString(vcard4_text)
addressbookObject = yield addressbook1.addressbookObjectWithName("1.vcf")
yield self.failUnlessFailure(
maybeDeferred(addressbookObject.setComponent, component),
InvalidObjectResourceError, InvalidUIDError,
)
@inlineCallbacks
def test_addressbookHomeWithUID_create(self):
"""
L{IAddressBookStoreTransaction.addressbookHomeWithUID} with
C{create=True} will create a addressbook home that doesn't exist yet.
"""
txn = self.transactionUnderTest()
noHomeUID = "xyzzy"
addressbookHome = yield txn.addressbookHomeWithUID(
noHomeUID,
create=True
)
@inlineCallbacks
def readOtherTxn():
otherTxn = self.savedStore.newTransaction()
self.addCleanup(otherTxn.commit)
returnValue((yield otherTxn.addressbookHomeWithUID(noHomeUID)))
self.assertProvides(IAddressBookHome, addressbookHome)
# A concurrent tnransaction shouldn't be able to read it yet:
self.assertIdentical((yield readOtherTxn()), None)
yield self.commit()
# But once it's committed, other transactions should see it.
self.assertProvides(IAddressBookHome, (yield readOtherTxn()))
@inlineCallbacks
def test_setComponent(self):
"""
L{AddressBookObject.setComponent} changes the result of
L{AddressBookObject.component} within the same transaction.
"""
component = VComponent.fromString(vcard1modified_text)
addressbook1 = yield self.addressbookUnderTest()
addressbookObject = yield addressbook1.addressbookObjectWithName("1.vcf")
oldComponent = yield addressbookObject.component()
self.assertNotEqual(component, oldComponent)
yield addressbookObject.setComponent(component)
self.assertEquals((yield addressbookObject.component()), component)
# Also check a new instance
addressbookObject = yield addressbook1.addressbookObjectWithName("1.vcf")
self.assertEquals((yield addressbookObject.component()), component)
# notify is called prior to commit
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CardDAV/example.com/home1/", PushPriority.high),
("/CardDAV/example.com/home1/addressbook/", PushPriority.high),
])
)
yield self.commit()
def checkPropertiesMethod(self, thunk):
"""
Verify that the given object has a properties method that returns an
L{IPropertyStore}.
"""
properties = thunk.properties()
self.assertProvides(IPropertyStore, properties)
@inlineCallbacks
def test_homeProperties(self):
"""
L{IAddressBookHome.properties} returns a property store.
"""
self.checkPropertiesMethod((yield self.homeUnderTest()))
@inlineCallbacks
def test_addressbookProperties(self):
"""
L{IAddressBook.properties} returns a property store.
"""
self.checkPropertiesMethod((yield self.addressbookUnderTest()))
@inlineCallbacks
def test_addressbookObjectProperties(self):
"""
L{IAddressBookObject.properties} returns a property store.
"""
self.checkPropertiesMethod((yield self.addressbookObjectUnderTest()))
@inlineCallbacks
def test_newAddressBookObjectProperties(self):
"""
L{IAddressBookObject.properties} returns an empty property store for a
addressbook object which has been created but not committed.
"""
addressbook = yield self.addressbookUnderTest()
yield addressbook.createAddressBookObjectWithName(
"4.vcf", VComponent.fromString(vcard4_text)
)
newEvent = yield addressbook.addressbookObjectWithName("4.vcf")
self.assertEquals(newEvent.properties().items(), [])
@inlineCallbacks
def test_setComponentPreservesProperties(self):
"""
L{IAddressBookObject.setComponent} preserves properties.
(Some implementations must go to extra trouble to provide this
behavior; for example, file storage must copy extended attributes from
the existing file to the temporary file replacing it.)
"""
propertyName = PropertyName("http://example.com/ns", "example")
propertyContent = WebDAVUnknownElement("sample content")
propertyContent.name = propertyName.name
propertyContent.namespace = propertyName.namespace
abobject = (yield self.addressbookObjectUnderTest())
if abobject._parentCollection.objectResourcesHaveProperties():
(yield self.addressbookObjectUnderTest()).properties()[
propertyName] = propertyContent
yield self.commit()
# Sanity check; are properties even readable in a separate transaction?
# Should probably be a separate test.
self.assertEquals(
(yield self.addressbookObjectUnderTest()).properties()[
propertyName
],
propertyContent)
obj = yield self.addressbookObjectUnderTest()
vcard1_text = yield obj._text()
vcard1_text_withDifferentNote = vcard1_text.replace(
"NOTE:CardDAV protocol updates",
"NOTE:Changed"
)
# Sanity check; make sure the test has the right idea of the subject.
self.assertNotEquals(vcard1_text, vcard1_text_withDifferentNote)
newComponent = VComponent.fromString(vcard1_text_withDifferentNote)
yield obj.setComponent(newComponent)
# Putting everything into a separate transaction to account for any
# caching that may take place.
yield self.commit()
self.assertEquals(
(yield self.addressbookObjectUnderTest()).properties()[propertyName],
propertyContent
)
@inlineCallbacks
def test_dontLeakAddressbooks(self):
"""
Addressbooks in one user's addressbook home should not show up in another
user's addressbook home.
"""
homeNew = yield self.transactionUnderTest().addressbookHomeWithUID(
"homeNew", create=True
)
ab = yield homeNew.addressbookWithName("addressbook")
self.assertEquals((yield ab.addressbookObjects()), [])
@inlineCallbacks
def test_dontLeakObjects(self):
"""
Addressbook objects in one user's addressbook should not show up in another
user's via uid or name queries.
"""
home1 = yield self.homeUnderTest()
homeNew = yield self.transactionUnderTest().addressbookHomeWithUID(
"homeNew", create=True)
addressbook1 = yield home1.addressbookWithName("addressbook")
addressbook2 = yield homeNew.addressbookWithName("addressbook")
objects = list((yield (yield homeNew.addressbookWithName("addressbook")).addressbookObjects()))
self.assertEquals(objects, [])
for resourceName in self.requirements['home1']['addressbook'].keys():
obj = yield addressbook1.addressbookObjectWithName(resourceName)
self.assertIdentical(
(yield addressbook2.addressbookObjectWithName(resourceName)), None)
self.assertIdentical(
(yield addressbook2.addressbookObjectWithUID(obj.uid())), None)
| |
#
# Module providing various facilities to other parts of the package
#
# billiard/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import sys
import errno
import functools
import atexit
try:
import cffi
except ImportError:
import ctypes
try:
from subprocess import _args_from_interpreter_flags # noqa
except ImportError: # pragma: no cover
def _args_from_interpreter_flags(): # noqa
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'hash_randomization': 'R',
'py3k_warning': '3',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
from multiprocessing.util import ( # noqa
_afterfork_registry,
_afterfork_counter,
_exit_function,
_finalizer_registry,
_finalizer_counter,
Finalize,
ForkAwareLocal,
ForkAwareThreadLock,
get_temp_dir,
is_exiting,
register_after_fork,
_run_after_forkers,
_run_finalizers,
)
from .compat import get_errno
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
# Constants from prctl.h
PR_GET_PDEATHSIG = 2
PR_SET_PDEATHSIG = 1
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
WARNING = 30
ERROR = 40
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args, **kwargs):
if _logger:
_logger.log(SUBDEBUG, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
if _logger:
_logger.log(DEBUG, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
if _logger:
_logger.log(INFO, msg, *args, **kwargs)
def sub_warning(msg, *args, **kwargs):
if _logger:
_logger.log(SUBWARNING, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
if _logger:
_logger.log(WARNING, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
if _logger:
_logger.log(ERROR, msg, *args, **kwargs)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
def get_pdeathsig():
"""
Return the current value of the parent process death signal
"""
if not sys.platform.startswith('linux'):
# currently we support only linux platform.
raise OSError()
try:
if 'cffi' in sys.modules:
ffi = cffi.FFI()
ffi.cdef("int prctl (int __option, ...);")
arg = ffi.new("int *")
C = ffi.dlopen(None)
C.prctl(PR_GET_PDEATHSIG, arg)
return arg[0]
else:
sig = ctypes.c_int()
libc = ctypes.cdll.LoadLibrary("libc.so.6")
libc.prctl(PR_GET_PDEATHSIG, ctypes.byref(sig))
return sig.value
except Exception:
raise OSError()
def set_pdeathsig(sig):
"""
Set the parent process death signal of the calling process to sig
(either a signal value in the range 1..maxsig, or 0 to clear).
This is the signal that the calling process will get when its parent dies.
This value is cleared for the child of a fork(2) and
(since Linux 2.4.36 / 2.6.23) when executing a set-user-ID or set-group-ID binary.
"""
if not sys.platform.startswith('linux'):
# currently we support only linux platform.
raise OSError()
try:
if 'cffi' in sys.modules:
ffi = cffi.FFI()
ffi.cdef("int prctl (int __option, ...);")
C = ffi.dlopen(None)
C.prctl(PR_SET_PDEATHSIG, ffi.cast("int", sig))
else:
libc = ctypes.cdll.LoadLibrary("libc.so.6")
libc.prctl(PR_SET_PDEATHSIG, sig)
except Exception:
raise OSError()
def _eintr_retry(func):
'''
Automatic retry after EINTR.
'''
@functools.wraps(func)
def wrapped(*args, **kwargs):
while 1:
try:
return func(*args, **kwargs)
except OSError as exc:
if get_errno(exc) != errno.EINTR:
raise
return wrapped
| |
from collections import namedtuple
import sys
import traceback
from src.shared.logconfig import newLogger
log = newLogger(__name__)
# Normal delimiter; separates tokens in messages.
TOKEN_DELIM = " "
# Used to indicate the start of an unsafe string; must be the first character
# after TOKEN_DELIM.
START_STRING = "|"
# Mapping from command word to Message (sub)classes.
# Pylint thinks this is a constant, but pylint is wrong.
messagesByCommand = {} # pylint: disable=invalid-name
class Message(object):
command = None
argSpecs = None
# Note: this doesn't seem to be necessary, but that might just be because
# (I think) namedtuple overrides __new__ instead of __init__.
# def __init__(self, *args):
# super(Message, self).__init__(*args)
# For a message that was just deserialized, maybe we should cache the
# original string and return it, rather than regenerating it?
def serialize(self):
return serializeMessage(self)
def __str__(self):
return self.serialize()
def defineMessageType(commandWord, argNamesAndSpecs):
"""
Define a new message type.
argNamesAndSpecs should be a list of tuples (name, spec), where name is the
name of that argument and spec is an ArgumentSpecification object
describing how it is encoded and decoded when the message is serialized and
deserialized.
"""
if commandWord in messagesByCommand:
raise ValueError("Message command {0!r} is already taken."
.format(commandWord))
assert not any(nameSpec[1].unsafe for nameSpec in argNamesAndSpecs[:-1])
# TODO: snake_case to BigCamelCase?
# Ideally we'd choose this name so that it matches the actual message class
# name. Or just override __str__ in Message.
# Pylint thinks this is a variable, for valid reasons. But it's logically a
# typename, so override the warning in this case.
NamedTupleType = namedtuple( # pylint: disable=invalid-name
commandWord + "_message_tuple",
[nameSpec[0] for nameSpec in argNamesAndSpecs]
)
# Subclass from Message before NamedTupleType, so that we can override some
# methods of NamedTupleType in Message. (We may want to do this with
# __str__?)
class NewMessageType(Message, NamedTupleType):
command = commandWord
argSpecs = [nameSpec[1] for nameSpec in argNamesAndSpecs]
def __init__(self, *args):
super(NewMessageType, self).__init__(*args)
messagesByCommand[commandWord] = NewMessageType
return NewMessageType
def serializeMessage(message):
argStrings = []
assert len(message.argSpecs) == len(message)
for argSpec, arg in zip(message.argSpecs, message):
argWords = argSpec.encode(arg)
if argSpec.count == 1:
argWords = (argWords,)
argStrings.extend(argWords)
lastIsUnsafe = message.argSpecs[-1].unsafe if message.argSpecs else False
return buildMessage(message.command, argStrings, lastIsUnsafe=lastIsUnsafe)
# Note: errorOnFail might never be passed as False; currently all callers that
# don't want to crash still pass errorOnFail=True and just handle
# InvalidMessageError themselves.
def deserializeMessage(data, errorOnFail=True):
try:
cmd, argStrings = tokenize(data)
if cmd not in messagesByCommand:
raise InvalidMessageError(data, "Unrecognized message command.")
messageType = messagesByCommand[cmd]
args = []
for argSpec in messageType.argSpecs:
if argSpec.count > len(argStrings):
raise InvalidMessageError(data,
"Not enough arguments for command.")
if argSpec.count == 1:
args.append(argSpec.decode(argStrings[0]))
else:
currWords = tuple(argStrings[:argSpec.count])
args.append(argSpec.decode(currWords))
argStrings = argStrings[argSpec.count:]
if len(argStrings) > 0:
raise InvalidMessageError(data, "Too many arguments for command.")
return messageType(*args)
except StandardError, exc:
# Log the full traceback, noting where we are, much like what Twisted
# does for an uncaught exception.
stackBelow = traceback.format_exception(sys.exc_type, sys.exc_value,
sys.exc_traceback)
stackAbove = traceback.format_stack()
# Remove the last stack entry from stackAbove, because that's the call
# to format_stack() which isn't part of the exception's traceback.
stackAbove = stackAbove[:-1]
# Remove the first entry from stackBelow, because that's the "Traceback
# (most recent call last)" line which we want at the top of the
# traceback, rather than the middle.
headerLine = stackBelow[0]
stackBelow = stackBelow[1:]
# Add an extra entry in the middle noting that this is where we caught
# the exception.
midLine = "--- <exception caught here> ---\n"
fullStack = [headerLine] + stackAbove + [midLine] + stackBelow
log.debug("Caught exception in deserializeMessage.\n" +
"".join(fullStack))
if errorOnFail:
# Reraise the exception, but converted (if necessary) to an
# InvalidMessageError. This ensures that it'll be handled correctly
# by the caller and not cause the program to crash unnecessarily.
if isinstance(exc, InvalidMessageError):
raise exc
else:
raise InvalidMessageError(data, str(exc))
else:
return None
class ArgumentSpecification(object):
"""
Class used to describe how one logical argument to a message is encoded and
decoded. Note that a single logical argument might be encoded as several
(space-separated) words in the actual message string -- for example, a
position is encoded as two words, one for each coordinate.
"""
def __init__(self, numWords, decodeFunc, encodeFunc=None, unsafe=False):
"""
Initialize an ArgumentSpecification.
- numWords is the number of words used to encode this argument in a
message string.
- decodeFunc is a function to parse those words into a more useful
object. It takes in a tuple of strings if numWords > 1, else a
single string, and returns a parsed object.
- encodeFunc is similar, but operates in reverse. It returns a tuple
of strings if numWords > 1, else a single string. If numWords == 1,
then encodeFunc may be omitted, in which case the argument will
just be str()ed.
- unsafe indicates whether the last word of this argument is an
unsafe string.
"""
self.count = numWords
self.decodeFunc = decodeFunc
if encodeFunc is None:
assert numWords == 1
# TODO: Should we use repr instead?
self.encodeFunc = str
else:
self.encodeFunc = encodeFunc
self.unsafe = unsafe
def encode(self, arg):
"""
Encode an object corresponding to this argument as one or more words.
Returns either a single string or a tuple of strings, the same as
the encodeFunct passed to __init__.
"""
words = self.encodeFunc(arg)
if self.count == 1:
assert isinstance(words, str)
else:
# Alow encodeFunc to give a list instead of a tuple, because that's
# close enough.
assert isinstance(words, (tuple, list))
assert len(words) == self.count
return words
def decode(self, words):
"""
Parse one or more words into the appropriate type of object. The type
of 'words' is the same as would be passed to the decodeFunc passed to
__init__.
"""
if self.count == 1:
assert isinstance(words, str)
else:
# Since words always comes from deserializeMessage, require that
# it be a tuple, not a list. There's no real problem with it being
# a list, but we know that that shouldn't happen, so if it does,
# then it suggests a bug.
assert isinstance(words, tuple)
assert len(words) == self.count
return self.decodeFunc(words)
# For an arbitrary string command and an arbitrary list of strings args,
# tokenize(buildMessage(command, args, lastIsUnsafe=<whatever>))
# should either return exactly (command, args) or raise an InvalidMessageError.
def tokenize(message):
if not message:
raise InvalidMessageError(message, "Empty message.")
if message[0] == START_STRING:
raise InvalidMessageError(message,
"Message starts with unsafe string.")
index = message.find(TOKEN_DELIM + START_STRING)
if index == -1:
tokens = message.split(TOKEN_DELIM)
else:
tokens = message[:index].split(TOKEN_DELIM) + [message[index + 2:]]
if not all(tokens):
log.warning("Empty token in %s.", tokens)
return (tokens[0], tokens[1:])
# TODO: We can and should unit-test buildMessage.
def buildMessage(command, args, lastIsUnsafe=False):
"""
Build a message from the given command and arguments. The arguments don't
have to be strings; if they aren't, then they will be str()ed.
If lastIsUnsafe, then the last argument is a potentially unsafe string (for
example, something typed by the user) and will be specially delimited.
Only the last argument is allowed to be unsafe.
"""
command = str(command)
args = map(str, args)
lastArg = None
if lastIsUnsafe:
if not args:
raise InvalidMessageError(command, "No arguments.")
lastArg = args[-1]
args = args[:-1]
# Build the message. This has to come before the checking so that we have a
# message to pass to any InvalidMessageErrors that we raise. Note that if
# we raise an InvalidMessageError, the message we report with it isn't
# actually a real message, because in that situation we've failed to build
# a message. But it doesn't seem worth it to create yet another Exception
# subclass, and InvalidMessageError seems more or less logically correct
# for that type of error.
message = TOKEN_DELIM.join([command] + args)
if lastIsUnsafe:
message += TOKEN_DELIM + START_STRING + lastArg
# Check that (with the exception of the possible unsafe string at the end),
# the message tokenizes correctly.
def checkToken(token, tokenDesc):
if TOKEN_DELIM in token:
raise InvalidMessageError(message,
"{0} may not contain {1!r}"
.format(tokenDesc, TOKEN_DELIM))
# TODO [#45]: Validate this.
if command.startswith(START_STRING):
raise InvalidMessageError(message,
"{0} may not start with {1!r}"
.format(tokenDesc, START_STRING))
checkToken(command, "Command")
for arg in args:
checkToken(arg, "Argument")
# Checks passed; this is a valid message.
return message
# Functions used to report bad messages.
def illFormedEMessage(error, otherLog, clientId=None):
"""
Log a warning for when a message string originating from an external source
could not be parsed into a message, for example because it used a
nonexistent command or passed the wrong number of arguments.
"""
# TODO: This logic could probably be factored out I guess.
sender = "external source"
if clientId is not None:
sender = "client {}".format(clientId)
otherLog.warning("Received invalid message from %s: %s", sender, error)
def badEMessageCommand(message, otherLog, clientId=None):
"""
Log a warning for a message originating from an external source, where the
message is well-formed (valid command with the right number of arguments)
but it was received by a part of the code that doesn't know how to handle
that type of message.
"""
sender = "external source"
if clientId is not None:
sender = "client {}".format(clientId)
otherLog.warning("Could not handle message type from %s: %s",
sender, message.command)
def badEMessageArgument(message, otherLog, clientId=None, reason=""):
"""
Log a warning for a message originating from an external source, where the
message is well-formed and was received by a part of the code that knows
how to handle that type of message, but one of the arguments to the message
is invalid (for example, out of range).
"""
sender = "external source"
if clientId is not None:
sender = "client {}".format(clientId)
if reason:
reason = "\n " + reason
otherLog.warning("Invalid argument to message from %s: %s%s",
sender, message, reason)
# In this case you should just let error propagate up, rather than catching it
# at all.
#
# def illFormedIMessage(error, otherLog, clientId=None):
# """
# Give an error for when a message string originating from an internal
# source could not be parsed into a message, for example because it used a
# nonexistent command or passed the wrong number of arguments.
# """
#
# otherLog.error("Received invalid message: %s", error)
# raise error
def badIMessageCommand(message, otherLog):
"""
Give an error for when a message originating from an internal source is
received by a part of the code that doesn't know how to handle that type of
message.
"""
error = "Could not handle message type from internal source: {command}." \
.format(command=message.command)
otherLog.error(error)
raise InvalidMessageError(message.serialize(), error)
def badIMessageArgument(message, otherLog, reason=""):
"""
Give an error for when a message originating from an internal source is
received by a part of the code that knows how to handle that type of
message, but one of the arguments to the message is invalid (for example,
out of range).
"""
if reason:
reason = "\n " + reason
otherLog.warning("Invalid argument in internal message: %s%s",
message, reason)
class InvalidMessageError(StandardError):
def __init__(self, badMessage, errorDesc):
super(InvalidMessageError, self).__init__()
self.badMessage = badMessage
self.errorDesc = errorDesc
def __str__(self):
return "{desc} (Message is: {msg!r})".format(desc = self.errorDesc,
msg = self.badMessage)
| |
#!/usr/bin/env python
"""
Extension for individual surveys.
"""
import os
import numpy as np
import pylab as plt
import pandas as pd
from collections import OrderedDict as odict
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
import mpl_toolkits.axisartist as axisartist
import mpl_toolkits.axisartist.angle_helper as angle_helper
from skymap.utils import setdefaults,get_datadir,hpx_gal2cel
from skymap.core import Skymap,McBrydeSkymap,OrthoSkymap
from skymap.constants import DECAM
# Derived from telra,teldec of 10000 exposures
DES_SN = odict([
('E1',dict(ra=7.874, dec=-43.010)),
('E2',dict(ra=9.500, dec=-43.999)),
('X1',dict(ra=34.476, dec=-4.931 )),
('X2',dict(ra=35.664, dec=-6.413 )),
('X3',dict(ra=36.449, dec=-4.601 )),
('S1',dict(ra=42.818, dec=0.000 )),
('S2',dict(ra=41.193, dec=-0.991 )),
('C1',dict(ra=54.274, dec=-27.113)),
('C2',dict(ra=54.274, dec=-29.090)),
('C3',dict(ra=52.647, dec=-28.101)),
])
DES_SN_LABELS = odict([
('SN-E', dict(ra=15, dec=-38, ha='center')),
('SN-X', dict(ra=35, dec=-13, ha='center')),
('SN-S', dict(ra=55, dec=0, ha='center')),
('SN-C', dict(ra=57, dec=-36, ha='center')),
])
class SurveySkymap(Skymap):
"""Extending to survey specific functions.
"""
def draw_maglites(self,**kwargs):
"""Draw the MagLiteS footprint"""
defaults=dict(color='blue', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'maglites-poly.txt')
self.draw_polygon(filename,**kwargs)
def draw_bliss(self,**kwargs):
"""Draw the BLISS footprint"""
defaults=dict(color='magenta', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'bliss-poly.txt')
self.draw_polygons(filename,**kwargs)
#data = np.genfromtxt(filename,names=['ra','dec','poly'])
#for p in np.unique(data['poly']):
# poly = data[data['poly'] == p]
# self.draw_polygon_radec(poly['ra'],poly['dec'],**kwargs)
def draw_des(self,**kwargs):
""" Draw the DES footprint. """
return self.draw_des19(**kwargs)
def draw_des13(self,**kwargs):
""" Draw the DES footprint. """
defaults=dict(color='red', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'des-round13-poly.txt')
return self.draw_polygon(filename,**kwargs)
def draw_des17(self,**kwargs):
""" Draw the DES footprint. """
defaults=dict(color='blue', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'des-round17-poly.txt')
return self.draw_polygon(filename,**kwargs)
def draw_des19(self,**kwargs):
""" Draw the DES footprint. """
defaults=dict(color='blue', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'des-round19-poly.txt')
return self.draw_polygon(filename,**kwargs)
def draw_des_sn(self,**kwargs):
defaults = dict(facecolor='none',edgecolor='k',lw=1,zorder=10)
setdefaults(kwargs,defaults)
for v in DES_SN.values():
# This does the projection correctly, but fails at boundary
self.tissot(v['ra'],v['dec'],DECAM,100,**kwargs)
def draw_smash(self,**kwargs):
""" Draw the SMASH fields. """
defaults=dict(facecolor='none',color='k')
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'smash_fields_final.txt')
smash=np.genfromtxt(filename,dtype=[('ra',float),('dec',float)],usecols=[4,5])
xy = self.proj(smash['ra'],smash['dec'])
self.scatter(*xy,**kwargs)
def draw_decals(self,**kwargs):
defaults=dict(color='red', lw=2)
setdefaults(kwargs,defaults)
filename = os.path.join(get_datadir(),'decals-poly.txt')
return self.draw_polygon(filename,**kwargs)
def draw_jethwa(self,filename=None,log=True,**kwargs):
import healpy as hp
if not filename:
datadir = '/home/s1/kadrlica/projects/bliss/v0/data/'
datadir = '/Users/kadrlica/bliss/observing/data'
filename = os.path.join(datadir,'jethwa_satellites_n256.fits.gz')
hpxmap = hp.read_map(filename)
if log:
return self.draw_hpxmap(np.log10(hpxmap),**kwargs)
else:
return self.draw_hpxmap(hpxmap,**kwargs)
def draw_planet9(self,**kwargs):
from scipy.interpolate import interp1d
from scipy.interpolate import UnivariateSpline
defaults=dict(color='b',lw=3)
setdefaults(kwargs,defaults)
datadir = '/home/s1/kadrlica/projects/bliss/v0/data/'
datadir = '/Users/kadrlica/bliss/observing/data/'
ra_lo,dec_lo=np.genfromtxt(datadir+'p9_lo.txt',usecols=(0,1)).T
ra_lo,dec_lo = self.roll(ra_lo,dec_lo)
ra_lo += -360*(ra_lo > 180)
ra_lo,dec_lo = ra_lo[::-1],dec_lo[::-1]
ra_hi,dec_hi=np.genfromtxt(datadir+'p9_hi.txt',usecols=(0,1)).T
ra_hi,dec_hi = self.roll(ra_hi,dec_hi)
ra_hi += -360*(ra_hi > 180)
ra_hi,dec_hi = ra_hi[::-1],dec_hi[::-1]
spl_lo = UnivariateSpline(ra_lo,dec_lo)
ra_lo_smooth = np.linspace(ra_lo[0],ra_lo[-1],360)
dec_lo_smooth = spl_lo(ra_lo_smooth)
spl_hi = UnivariateSpline(ra_hi,dec_hi)
ra_hi_smooth = np.linspace(ra_hi[0],ra_hi[-1],360)
dec_hi_smooth = spl_hi(ra_hi_smooth)
#self.plot(ra_lo,dec_lo,latlon=True,**kwargs)
#self.plot(ra_hi,dec_hi,latlon=True,**kwargs)
self.plot(ra_lo_smooth,dec_lo_smooth,latlon=True,**kwargs)
self.plot(ra_hi_smooth,dec_hi_smooth,latlon=True,**kwargs)
orb = pd.read_csv(datadir+'P9_orbit_Cassini.csv').to_records(index=False)[::7]
kwargs = dict(marker='o',s=40,edgecolor='none',cmap='jet_r')
self.scatter(*self.proj(orb['ra'],orb['dec']),c=orb['cassini'],**kwargs)
def draw_ligo(self,filename=None, log=True,**kwargs):
import healpy as hp
from astropy.io import fits as pyfits
if not filename:
datadir = '/home/s1/kadrlica/projects/bliss/v0/data/'
datadir = '/Users/kadrlica/bliss/observing/data'
filename = datadir + 'obsbias_heatmap_semesterA.fits'
hpxmap = pyfits.open(filename)[0].data
if log: self.draw_hpxmap(np.log10(hpxmap))
else: self.draw_hpxmap(hpxmap)
def draw_sfd(self,filename=None,**kwargs):
import healpy as hp
defaults = dict(rasterized=True,cmap=plt.cm.binary)
setdefaults(kwargs,defaults)
if not filename:
datadir = '/Users/kadrlica/bliss/observing/data/'
filename = datadir+'lambda_sfd_ebv.fits'
galhpx = hp.read_map(filename)
celhpx = hpx_gal2cel(galhpx)
return self.draw_hpxmap(np.log10(celhpx),**kwargs)
class SurveyMcBryde(SurveySkymap,McBrydeSkymap): pass
class SurveyOrtho(SurveySkymap,OrthoSkymap): pass
# Original DES Formatter
# ADW: Why doesn't ZoomFormatter180 work?
class ZoomFormatterDES(angle_helper.FormatterDMS):
def __call__(self, direction, factor, values):
values = np.asarray(values)
ss = np.where(values>=0, 1, -1)
values = np.mod(np.abs(values),360)
values -= 360*(values > 180)
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
class ZoomFormatter(angle_helper.FormatterDMS):
def _wrap_angle(self, angle):
return angle
def __call__(self, direction, factor, values):
values = np.asarray(values)
values = self._wrap_angle(values)
ticks = [self.fmt_d % int(v) for v in values]
return ticks
class ZoomFormatter360(ZoomFormatter):
def _wrap_angle(self, angle):
"""Ticks go from 0 to 360"""
angle = np.mod(angle,360)
return angle
class ZoomFormatter180(ZoomFormatter):
def _wrap_angle(self, angle):
"""Ticks go from -180 to 180"""
angle = np.mod(np.abs(angle),360)
angle -= 360*(angle > 180)
return angle
class SurveyZoom(SurveyMcBryde):
FRAME = [[-50,-50,90,90],[10,-75,10,-75]]
FIGSIZE=(8,5)
def __init__(self, rect=None, *args, **kwargs):
super(SurveyZoom,self).__init__(*args, **kwargs)
self.create_axes(rect)
@classmethod
def figure(cls,**kwargs):
""" Create a figure of proper size """
defaults=dict(figsize=cls.FIGSIZE)
setdefaults(kwargs,defaults)
return plt.figure(**kwargs)
def draw_parallels(*args, **kwargs): return
def draw_meridians(*args, **kwargs): return
def set_axes_limits(self, ax=None):
if ax is None: ax = plt.gca()
x,y = self(*self.FRAME)
ax.set_xlim(min(x),max(x))
ax.set_ylim(min(y),max(y))
ax.grid(True,linestyle=':',color='k',lw=0.5)
# Fix the aspect ratio for full-sky projections
if self.fix_aspect:
ax.set_aspect('equal',anchor=self.anchor)
else:
ax.set_aspect('auto',anchor=self.anchor)
return ax.get_xlim(),ax.get_ylim()
def create_tick_formatter(self):
return ZoomFormatter()
def create_axes(self,rect=111):
"""
Create a special AxisArtist to overlay grid coordinates.
Much of this taken from the examples here:
http://matplotlib.org/mpl_toolkits/axes_grid/users/axisartist.html
"""
# from curved coordinate to rectlinear coordinate.
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return self(x,y)
# from rectlinear coordinate to curved coordinate.
def inv_tr(x,y):
x, y = np.asarray(x), np.asarray(y)
return self(x,y,inverse=True)
# Cycle the coordinates
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20)
# Find a grid values appropriate for the coordinate.
# The argument is a approximate number of grid lines.
grid_locator1 = angle_helper.LocatorD(9,include_last=False)
#grid_locator1 = angle_helper.LocatorD(8,include_last=False)
grid_locator2 = angle_helper.LocatorD(6,include_last=False)
# Format the values of the grid
tick_formatter1 = self.create_tick_formatter()
tick_formatter2 = angle_helper.FormatterDMS()
grid_helper = GridHelperCurveLinear((tr, inv_tr),
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2,
)
fig = plt.gcf()
if rect is None:
# This doesn't quite work. Need to remove the existing axis...
rect = plt.gca().get_position()
plt.gca().axis('off')
ax = axisartist.Axes(fig,rect,grid_helper=grid_helper)
fig.add_axes(ax)
else:
ax = axisartist.Subplot(fig,rect,grid_helper=grid_helper)
fig.add_subplot(ax)
## Coordinate formatter
def format_coord(x, y):
return 'lon=%1.4f, lat=%1.4f'%inv_tr(x,y)
ax.format_coord = format_coord
ax.axis['left'].major_ticklabels.set_visible(True)
ax.axis['right'].major_ticklabels.set_visible(False)
ax.axis['bottom'].major_ticklabels.set_visible(True)
ax.axis['top'].major_ticklabels.set_visible(True)
ax.set_xlabel("Right Ascension")
ax.set_ylabel("Declination")
#self.set_axes_limits()
self.axisartist = ax
return fig,ax
class DESSkymapMcBryde(SurveyZoom):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[-50,-50,90,90],[10,-75,10,-75]]
FIGSIZE=(8,5)
def __init__(self, *args, **kwargs):
defaults = dict(lon_0=0,celestial=True)
setdefaults(kwargs,defaults)
super(DESSkymap,self).__init__(*args, **kwargs)
def create_tick_formatter(self):
return ZoomFormatterDES()
#return ZoomFormatter180()
DESSkymap = DESSkymapMcBryde
### These should be moved into streamlib
class DESSkymapQ1(DESSkymapMcBryde):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[10,-46],[-68,-38]]
def draw_inset_colorbar(self, *args, **kwargs):
defaults = dict(loc=4,height="6%",width="20%",bbox_to_anchor=(0,0.05,1,1))
setdefaults(kwargs,defaults)
super(DESSkymapMcBryde,self).draw_inset_colorbar(*args,**kwargs)
class DESSkymapQ2(DESSkymapMcBryde):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[60,0],[8,-45]]
def draw_inset_colorbar(self, *args, **kwargs):
defaults = dict(loc=2,width="30%",height="4%",bbox_to_anchor=(0,-0.1,1,1))
setdefaults(kwargs,defaults)
super(DESSkymapMcBryde,self).draw_inset_colorbar(*args,**kwargs)
class DESSkymapQ3(DESSkymapMcBryde):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[5,60],[-68,-38]]
def draw_inset_colorbar(self, *args, **kwargs):
defaults = dict(loc=3,height="7%",bbox_to_anchor=(0,0.05,1,1))
setdefaults(kwargs,defaults)
super(DESSkymapMcBryde,self).draw_inset_colorbar(*args,**kwargs)
class DESSkymapQ4(DESSkymapMcBryde):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[90,70],[-15,-55]]
def draw_inset_colorbar(self, *args, **kwargs):
defaults = dict(loc=3,width="30%",height="4%",bbox_to_anchor=(0,0.05,1,1))
setdefaults(kwargs,defaults)
super(DESSkymapMcBryde,self).draw_inset_colorbar(*args,**kwargs)
class DESSkymapSPT(DESSkymapMcBryde):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[-55,-55,95,95],[-35,-75,-35,-75]]
FIGSIZE=(8,3)
def draw_inset_colorbar(self, *args, **kwargs):
defaults = dict(loc=3,width="30%",height="4%",bbox_to_anchor=(0,0.05,1,1))
setdefaults(kwargs,defaults)
super(DESSkymapMcBryde,self).draw_inset_colorbar(*args,**kwargs)
class DESSkymapCart(SurveyZoom):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[-60,-60,100,100],[10,-75,10,-75]]
FIGSIZE=(8,5)
def __init__(self, *args, **kwargs):
defaults = dict(projection='cyl',celestial=True)
setdefaults(kwargs,defaults)
super(DESSkymapCart,self).__init__(*args, **kwargs)
def create_tick_formatter(self):
return ZoomFormatterDES()
#return ZoomFormatter180()
class DESLambert(SurveySkymap):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FIGSIZE=(8,5)
def __init__(self, *args, **kwargs):
defaults = dict(projection='laea',lon_0=120,lat_0=-90,
llcrnrlon=-110,llcrnrlat=8,
urcrnrlon=60,urcrnrlat=-15,
round=False,celestial=False)
setdefaults(kwargs,defaults)
super(SurveySkymap,self).__init__(*args, **kwargs)
def draw_meridians(self,*args,**kwargs):
def lon2str(deg):
# This is a function just to remove some weird string formatting
deg -= 360. * (deg >= 180)
if (np.abs(deg) == 0):
return r"$%d{}^{\circ}$"%(deg)
elif (np.abs(deg) == 180):
return r"$%+d{}^{\circ}$"%(np.abs(deg))
else:
return r"$%+d{}^{\circ}$"%(deg)
#defaults = dict(labels=[1,1,1,1],labelstyle='+/-',
# fontsize=14,fmt=lon2str)
defaults = dict(fmt=lon2str,labels=[1,1,1,1],fontsize=14)
if not args:
defaults.update(meridians=np.arange(0,360,60))
setdefaults(kwargs,defaults)
#return self.drawmeridians(*args,**kwargs)
return super(DESLambert,self).draw_meridians(*args,**kwargs)
def draw_parallels(self,*args,**kwargs):
defaults = dict(labels=[0,0,0,0])
setdefaults(kwargs,defaults)
ret = super(DESLambert,self).draw_parallels(*args,**kwargs)
ax = plt.gca()
for l in ret.keys():
ax.annotate(r"$%+d{}^{\circ}$"%(l), self(0,l),xycoords='data',
xytext=(+5,+5),textcoords='offset points',
va='top',ha='left',fontsize=12)
return ret
def draw_inset_colorbar(self,*args,**kwargs):
defaults = dict(bbox_to_anchor=(-0.01,0.07,1,1))
setdefaults(kwargs,defaults)
return super(DESLambert,self).draw_inset_colorbar(*args,**kwargs)
class DESPolarLambert(DESLambert):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FIGSIZE=(8,8)
def __init__(self, *args, **kwargs):
defaults = dict(projection='splaea',lon_0=60,boundinglat=-20,
round=True,celestial=True,parallels=True)
setdefaults(kwargs,defaults)
super(SurveySkymap,self).__init__(*args, **kwargs)
class BlissSkymap(SurveyZoom):
"""Class for plotting a zoom on BLISS. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[130,130,0,0],[-5,-55,-5,-55]]
FIGSIZE = (12,3)
defaults = dict(lon_0=-100)
wrap_angle = 60
def __init__(self, *args, **kwargs):
setdefaults(kwargs,self.defaults)
super(BlissSkymap,self).__init__(*args, **kwargs)
def create_tick_formatter(self):
return ZoomFormatter360()
class DelveR1Skymap(SurveyZoom):
"""Class for plotting a zoom on DES. This is relatively inflexible."""
# RA, DEC frame limits
FRAME = [[110,110,-85,-85],[10,-75,10,-75]]
FIGSIZE=(8,5)
def __init__(self, *args, **kwargs):
defaults = dict(lon_0=-155,celestial=True)
setdefaults(kwargs,defaults)
super(DelveSkymap,self).__init__(*args, **kwargs)
def create_tick_formatter(self):
return ZoomFormatter360()
DelveSkymap = DelveR1Skymap
class MaglitesSkymap(SurveyOrtho):
defaults = dict(SurveyOrtho.defaults,lat_0=-90,celestial=True)
def draw_meridians(self,*args,**kwargs):
defaults = dict(labels=[1,1,1,1],fontsize=14,labelstyle='+/-')
setdefaults(kwargs,defaults)
cardinal = kwargs.pop('cardinal',False)
meridict = super(OrthoSkymap,self).draw_meridians(*args,**kwargs)
# We've switched to celestial, need to update meridian text
for k,v in meridict.items():
text = v[1][0].get_text()
if text.startswith('-'): text = text.replace('-','+')
elif text.startswith('+'): text = text.replace('+','-')
v[1][0].set_text(text)
return meridict
| |
from __future__ import print_function
import os
import sys
import time
import shutil
import logging
import datetime
import traceback
import subprocess
import numpy as np
from ..core.environ import environ
from ..core.logio import get_logger, add_filehandler, splash
from .tabular import TabularWriter
IOPT = 0
LASTEVALD = None
BIGNUM = 1.E+20
MAXITER = 50
TOL = 1.E-06
MAX_ERR = None
class Optimizer(object):
def __init__(self, job, func, xinit, method='simplex',
maxiter=MAXITER, tolerance=TOL, descriptors=None,
funcargs=[], Ns=10, dryrun=0, keep_intermediate=True,
halt_on_err=False):
environ.raise_e = True
environ.no_cutback = True
global IOPT
IOPT = 0
self.job = job
self.func = func
self.ran = False
self.dryrun = dryrun
self.halt_on_err = halt_on_err
d = os.path.realpath(os.getcwd())
self.directory = d
self.rootd = os.path.join(d, job + ".eval")
self.output = os.path.join(self.rootd, job + '.edb')
if not isinstance(descriptors, (list, tuple)):
descriptors = [descriptors]
self.descriptors = descriptors
self.nresp = len(descriptors)
if not isinstance(funcargs, (list, tuple)):
funcargs = [funcargs]
self.funcargs = [x for x in funcargs]
# Number of evaluations per dimension for brute force optimizations.
self.Ns = int(round(max(Ns, 2.0)))
# check method
valid = ('simplex', 'powell', 'cobyla', 'brute')
if method.lower() not in valid:
msg = 'Unknown optimization method {0!r}. '.format(method)
msg += 'Choose from {0}'.format(','.join(valid))
raise ValueError(msg)
self.method = method.lower()
# set up logger
if os.path.isdir(self.rootd):
shutil.rmtree(self.rootd)
os.makedirs(self.rootd)
# basic logger
logfile = os.path.join(self.rootd, self.job + '.log')
logger = get_logger('optimize')
add_filehandler(logger, logfile)
splash(logger)
# individual sims only log to file and not the console
environ.parent_process = 1
# check xinit
self.names = []
self.idata = []
self.bounds = []
for x in xinit:
if not isinstance(x, OptimizeVariable):
raise TypeError("all xinit must be of type OptimizeVariable")
self.names.append(x.name)
self.idata.append(x.initial_value)
if x.bounds is not None:
if self.method in ('simplex', 'powell'):
logger.warn('optimization method does not support bounds')
x.bounds = None
self.bounds.append(x.bounds)
if self.method in ('simplex', 'powell'):
self.bounds = None
if maxiter <= 0:
logger.warn("maxiter < 0, setting to default value")
maxiter = MAXITER
self.maxiter = maxiter
if tolerance <= 0:
logger.warn("tolerance < 0, setting to default value")
tolerance = TOL
self.tolerance = tolerance
self.tabular = TabularWriter(self.output, job)
self.timing = {}
# write summary to the log file
str_pars = "\n".join(" {0}={1:.2g}".format(name, self.idata[i])
for (i, name) in enumerate(self.names))
resp = "\n".join(" {0}".format(it) for it in self.descriptors)
summary = """
summary of optimization job input
------- -- ------------ --- -----
Job: {0}
Method: {1}
Variables: {2:d}
{3}
Response descriptors:
{4}
""".format(self.job, self.method, len(self.names), str_pars, resp)
logger.info(summary)
def run(self):
"""Run the optimization job
Set up directory to run the optimization job and call the minimizer
"""
import scipy.optimize
logger = logging.getLogger('optimize')
self.timing["start"] = time.time()
logger.info("{0}: Starting optimization jobs...".format(self.job))
# optimization methods work best with number around 1, here we
# normalize the optimization variables and save the multiplier to be
# used when the function gets called by the optimizer.
xfac = []
for ival in self.idata:
mag = eval("1.e" + "{0:12.6E}".format(ival).split("E")[1])
xfac.append(mag)
continue
xfac = np.array(xfac)
x0 = self.idata / xfac
if self.bounds is not None:
# user has specified bounds on the parameters to be optimized. Here,
# we convert the bounds to inequality constraints (for cobyla) and
# normalized bounds (for brute).
lcons, ucons = [], []
normalized_bounds = []
for ibnd, bound in enumerate(self.bounds):
lbnd, ubnd = bound
lcons.append(lambda z, idx=ibnd, bnd=lbnd: z[idx]-bnd/xfac[idx])
ucons.append(lambda z, idx=ibnd, bnd=ubnd: bnd/xfac[idx]-z[idx])
normalized_bounds.append((lbnd/xfac[ibnd], ubnd/xfac[ibnd]))
continue
cons = lcons + ucons
args = (self.func, self.funcargs, self.rootd, self.halt_on_err,
self.job, self.names, self.descriptors, self.tabular, xfac)
if self.dryrun:
# do a dry run of the function
err = run_job(x0, *args)
if err == np.nan:
s = 'Optimization dry run failed'
logger.error(s)
else:
s = 'Optimization dry run successful'
logger.info(s)
if environ.notebook:
print(s)
self.dryrun_error = err
return
if self.method == 'simplex':
xopt = scipy.optimize.fmin(
run_job, x0, xtol=self.tolerance, ftol=self.tolerance,
maxiter=self.maxiter, args=args, disp=0)
elif self.method == 'powell':
xopt = scipy.optimize.fmin_powell(
run_job, x0, xtol=self.tolerance, ftol=self.tolerance,
maxiter=self.maxiter, args=args, disp=0)
elif self.method == 'cobyla':
xopt = scipy.optimize.fmin_cobyla(
run_job, x0, cons, consargs=(), args=args, disp=0)
elif self.method == 'brute':
xopt = scipy.optimize.brute(
run_job, normalized_bounds, args=args, Ns=self.Ns, disp=0,
finish=None)
self.xopt = xopt * xfac
self.timing["end"] = time.time()
logger.info("\nOptimization jobs complete")
self.finish()
return
def finish(self):
""" finish up the optimization job """
logger = logging.getLogger('optimize')
self.tabular.close()
self.ran = True
opt_pars = "\n".join(" {0}={1:12.6E}".format(name, self.xopt[i])
for (i, name) in enumerate(self.names))
opt_time = self.timing["end"] - self.timing["start"]
summary = """
Summary of optimization results
------- -- ------------ -------
{0}: calculations completed ({1:.4f}s.)
Iterations: {2}
Optimized parameters
{3}
""".format(self.job, opt_time, IOPT, opt_pars)
logger.info(summary)
# write out optimized params
with open(os.path.join(self.rootd, "params.opt"), "w") as fobj:
for (i, name) in enumerate(self.names):
fobj.write("{0} = {1: .18f}\n".format(name, self.xopt[i]))
environ.parent_process = 0
# Link directory 'final' to the last evaluation directory
os.symlink(os.path.relpath(LASTEVALD, start=self.rootd),
os.path.join(self.rootd, "final"))
if environ.notebook:
print('\nDone')
def todict(self):
if not self.ran:
return None
return dict(zip(self.names, self.xopt))
@property
def duration(self):
if not self.ran:
return None
return self.timing["end"] - self.timing["start"]
def catd(d, i):
N = 3
return os.path.join(d, "eval_{0:0{1}d}".format(i, N))
def run_job(xcall, *args):
"""Objective function
Creates a directory to run the current job, runs the job, returns the
value of the objective function determined.
Returns
-------
error : float
Error in job
"""
global IOPT, LASTEVALD, MAX_ERR
logger = logging.getLogger('optimize')
func, funcargs, rootd, halt_on_err, job, xnames, desc, tabular, xfac = args
IOPT += 1
evald = catd(rootd, IOPT)
os.mkdir(evald)
LASTEVALD = evald
cwd = os.getcwd()
os.chdir(evald)
environ.simulation_dir = evald
# write the params.in for this run
x = xcall * xfac
parameters = zip(xnames, x)
with open("params.in", "w") as fobj:
for name, param in parameters:
fobj.write("{0} = {1: .18f}\n".format(name, param))
logger.info("starting job {0} with {1}... ".format(
IOPT, ",".join("{0}={1:.2g}".format(n, p) for n, p in parameters)),
extra={'continued':1})
if environ.notebook:
print('\rRunning job {0}'.format(IOPT), end='')
try:
err = func(x, xnames, evald, job, *funcargs)
logger.info("done (error={0:.4e})".format(err))
stat = 0
if MAX_ERR is None:
MAX_ERR = err
MAX_ERR = max(MAX_ERR, err)
except BaseException:
string = traceback.format_exc()
if not environ.notebook:
logger.error("\nRun {0} failed with the following "
"exception:\n{1}".format(IOPT, string))
else:
logger.info("failed")
if halt_on_err:
logger.error("\n\nHalting optimization on error at user request.\n")
raise # re-raise previous error
stat = 1
if MAX_ERR is None:
err = np.nan
else:
err = MAX_ERR
tabular.write_eval_info(IOPT, stat, evald, parameters, ((desc[0], err),))
os.chdir(cwd)
return err
class OptimizeVariable(object):
def __init__(self, name, initial_value, bounds=None):
self.name = name
self.ival = initial_value
self.cval = initial_value
self.bounds = bounds
errors = 0
# check bounds
if bounds is not None:
if not isinstance(bounds, (list, tuple, np.ndarray)):
raise ValueError("expected bounds to be a tuple of length 2")
if len(bounds) != 2:
raise ValueError("expected bounds to be a tuple of length 2")
if bounds[0] is None: bounds[0] = -BIGNUM
if bounds[1] is None: bounds[1] = BIGNUM
if bounds[0] > bounds[1]:
errors += 1
logging.error("{0}: upper bound < lower bound".format(name))
if bounds[1] < initial_value < bounds[0]:
errors += 1
logging.error("{0}: initial value not bracketed "
"by bounds".format(name))
if errors:
raise ValueError("stopping due to previous errors")
self.bounds = np.array(bounds)
def __repr__(self):
return "opt{0}({1})".format(self.name, self.initial_value)
@property
def current_value(self):
return self.cval
@property
def initial_value(self):
return self.ival
| |
__author__ = 'hfriedrich'
import luigi
import argparse
import luigi_evaluation_workflow
# This is the executed experiments script for the link prediction evaluation
# (e.g. on the 'testdataset_20141112' or a newer version).
# It executes the luigi_evaluation_workflow with the parameters specified below.
#
# how to run (for installation see README.MD):
# - Extract the test data set to a test data set folder
# - execute maven build (package) of this project to build the 'wonpreprocessing-1.0-SNAPSHOT-jar-with-dependencies.jar'
# - run this python script with its parameters
RESCAL_DEFAULT_PARAMS = ['--rank', '500', '--threshold', '0.02']
RESCAL2_DEFAULT_PARAMS = ['--rank2', '500', '--threshold2', '0.06']
COSINE_DEFAULT_PARAMS = ['--costhreshold', '0.5', '--costransthreshold', '0.0', '--wcosthreshold', '0.6',
'--wcostransthreshold', '0.0']
def output_folder_config():
return args.testdataset + '/evaluation'
def base_config():
base_params = ['--lock-pid-dir', args.luigitmp, '--local-scheduler', '--gatehome', args.gatehome,
'--inputfolder', args.testdataset + '/data', '--connections', args.testdataset + '/connections.txt']
if args.python:
base_params.extend(['--python', args.python])
if args.java:
base_params.extend(['--java', args.java])
return base_params
# evaluate all algorithms in their default configuration
def default_all_eval():
params = ['AllEvaluation'] + base_config() + RESCAL_DEFAULT_PARAMS + RESCAL2_DEFAULT_PARAMS + \
COSINE_DEFAULT_PARAMS + ['--outputfolder', output_folder_config() + '/results/default'] + \
['--tensorfolder', output_folder_config() + '/tensor', '--statistics']
luigi.run(params)
# evaluate the effect of masking all hub needs (needs that have more than a number of X connections)
def nohubneeds_eval():
params = ['AllEvaluation'] + base_config() + ['--outputfolder', output_folder_config() + '/results/nohubneeds'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--maxhubsize', '10'] + RESCAL_DEFAULT_PARAMS + RESCAL2_DEFAULT_PARAMS + COSINE_DEFAULT_PARAMS)
luigi.run(params + ['--maxhubsize', '10'] + ['--rank', '500', '--threshold', '0.03'] +
['--rank2', '500', '--threshold2', '0.05'] + ['--costhreshold', '0.4', '--costransthreshold', '0.0',
'--wcosthreshold', '0.4', '--wcostransthreshold', '0.0'])
luigi.run(params + ['--maxhubsize', '50'] + RESCAL_DEFAULT_PARAMS + RESCAL2_DEFAULT_PARAMS + COSINE_DEFAULT_PARAMS)
luigi.run(params + ['--maxhubsize', '50'] + ['--rank', '500', '--threshold', '0.03'] +
['--rank2', '500', '--threshold2', '0.05'] + ['--costhreshold', '0.4', '--costransthreshold', '0.0',
'--wcosthreshold', '0.4', '--wcostransthreshold', '0.0'])
# evaluate the influence of the rank on the quality and performance of link prediction
def rank_eval():
rank_threshold = [(50,[0.001, 0.002, 0.003]),
(100,[0.005, 0.006, 0.007]),
(250,[0.01, 0.012, 0.015]),
(500,[0.012, 0.015, 0.02]),
(750,[0.015, 0.02, 0.025]),
(1000,[0.02, 0.025, 0.03]),
(2000,[0.02, 0.025, 0.03])]
for tuple in rank_threshold:
rank = tuple[0]
for threshold in tuple[1]:
params = ['RESCALEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/rank'] + \
['--rank', str(rank), '--threshold', str(threshold)] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params)
# evaluate the influence of stopwords on the algorithms. This test executes the preprocessing without filtering out
# any stopwords (here in this case the effect might not be that big since only the subject line of emails is used as
# token input)
def no_stopwords():
params = ['AllEvaluation'] + base_config() + RESCAL_DEFAULT_PARAMS + RESCAL2_DEFAULT_PARAMS + \
COSINE_DEFAULT_PARAMS + ['--outputfolder', output_folder_config() + '/results/no_stopwords'] + \
['--gateapp', '../../src/main/resources/gate_no_stopwords/application.xgapp'] + \
['--tensorfolder', output_folder_config() + '/tensor_no_stopwords']
luigi.run(params)
# evaluate the transitive option of the cosine distance algorithms.
# That means taking connection information into account
def cosinetrans_eval():
COSINETRANNS_PARAMS = ['--costhreshold', '0.2', '--costransthreshold', '0.25', '--wcosthreshold', '0.2',
'--wcostransthreshold', '0.25']
params = ['CosineEvaluation'] + base_config() + COSINETRANNS_PARAMS + ['--outputfolder', output_folder_config() + '/results/cosinetrans'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params)
# evaluate the influence of stemming on the algorithms
def stemming_eval():
params = ['AllEvaluation'] + base_config() + RESCAL_DEFAULT_PARAMS + RESCAL2_DEFAULT_PARAMS + \
COSINE_DEFAULT_PARAMS + ['--stemming'] + ['--outputfolder', output_folder_config() + '/results/stemming'] + \
['--tensorfolder', output_folder_config() + '/tensor_stem']
luigi.run(params)
# evaluate the effect of adding the content slice (computed by GATE, only take Noun-phrases, see gate app for details)
# to the RESCAL evaluation
def content_slice_eval():
params = ['RESCALEvaluation'] + base_config() + ['--content', '--additionalslices', 'subject.mtx content.mtx'] + \
['--outputfolder', output_folder_config() + '/results/content'] + \
['--tensorfolder', output_folder_config() + '/tensor_content']
luigi.run(params + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--rank', '500', '--threshold', '0.03'])
# evaluate the effect of adding the category slice to the RESCAL evaluation
def category_slice_eval():
params = ['CategoryEvaluation'] + base_config() + ['--allneeds', args.testdataset + '/allneeds.txt'] + \
['--outputfolder', output_folder_config() + '/results/category'] + \
['--tensorfolder', output_folder_config() + '/tensor_category']
luigi.run(params + ['--rank', '500', '--threshold', '0.02'])
luigi.run(params + ['--rank', '500', '--threshold', '0.03'])
luigi.run(params + ['--rank', '500', '--threshold', '0.04'])
params = ['CategoryCosineEvaluation'] + base_config() + ['--allneeds', args.testdataset + '/allneeds.txt'] + \
['--outputfolder', output_folder_config() + '/results/category'] + \
['--tensorfolder', output_folder_config() + '/tensor_category']
luigi.run(params + COSINE_DEFAULT_PARAMS)
luigi.run(params + ['--costhreshold', '0.45', '--costransthreshold', '0.0', '--wcosthreshold', '0.45',
'--wcostransthreshold', '0.0'])
luigi.run(params + ['--costhreshold', '0.4', '--costransthreshold', '0.0', '--wcosthreshold', '0.4',
'--wcostransthreshold', '0.0'])
# evaluate the effect of adding the category slice to the RESCAL evaluation
def keyword_slice_eval():
params = ['KeywordEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/keyword'] + \
['--tensorfolder', output_folder_config() + '/tensor_keyword']
luigi.run(params + ['--rank', '500', '--threshold', '0.02'])
luigi.run(params + ['--rank', '500', '--threshold', '0.03'])
luigi.run(params + ['--rank', '500', '--threshold', '0.04'])
# evaluate the effect of adding the needtype slice to the RESCAL evaluation
def needtype_slice_eval():
params = ['RESCALEvaluation'] + base_config() + RESCAL_DEFAULT_PARAMS + ['--needtypeslice'] + \
['--outputfolder', output_folder_config() + '/results/needtype'] + ['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params)
# evaluate the effect of masking random connections instead of all connections of test needs
def maskrandom_eval():
params = ['RESCALEvaluation'] + base_config() + ['--outputfolder', output_folder_config() + '/results/maskrandom'] + \
['--maskrandom'] + ['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--rank', '500', '--threshold', '0.1'])
luigi.run(params + ['--rank', '500', '--threshold', '0.2'])
luigi.run(params + ['--rank', '500', '--threshold', '0.3'])
# evaluate the effect of adding transitive connections to needs only one edge away (connects needs of the same type)
def transitive_eval():
params = ['RESCALEvaluation'] + base_config() + ['--outputfolder', output_folder_config() + '/results/transitive'] + \
['--tensorfolder', output_folder_config() + '/tensor', ] + ['--transitive'] + ['--maxhubsize', '10']
luigi.run(params + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--rank', '500', '--threshold', '0.03'])
# evaluate the influence of the number of input connections (chosen randomly) to learn from on the RESCAL algorithm
def connection_rescalsim_eval():
connection_count = [0, 1, 2, 5, 10]
for con in connection_count:
params = ['RESCALEvaluation'] + base_config() + RESCAL2_DEFAULT_PARAMS + \
['--maxconnections', str(con)] + ['--outputfolder', output_folder_config() + '/results/connections'] + \
['--tensorfolder', output_folder_config() + '/tensor'] + ['--connectionslice2']
luigi.run(params)
# evaluate the influence of the number of input connections (chosen randomly) to learn from on the RESCALSIM algorithm
def connections_rescal_eval():
connection_threshold = [(1,[0.007, 0.01, 0.02]),
(2,[0.01, 0.02]),
(5,[0.02, 0.03]),
(10,[0.02, 0.03])]
for tuple in connection_threshold:
for threshold in tuple[1]:
con = tuple[0]
params = ['RESCALEvaluation'] + base_config() + ['--rank', '500', '--threshold', str(threshold)] + \
['--maxconnections', str(con)] + ['--outputfolder', output_folder_config() + '/results/connections'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params)
connection_threshold = [(10,[0.015, 0.02]),
(20,[0.015, 0.02]),
(50,[0.015, 0.02])]
for tuple in connection_threshold:
for threshold in tuple[1]:
con = tuple[0]
params = ['RESCALEvaluation'] + base_config() + ['--rank', '500', '--threshold', str(threshold)] + \
['--maxconnections', str(con)] + ['--outputfolder', output_folder_config() + '/results/connections'] + \
['--tensorfolder', output_folder_config() + '/tensor'] + ['--lambdaA', '5.0', '--lambdaR', '5.0', '--lambdaV', '5.0']
luigi.run(params)
def num_needs_eval():
params = ['AllEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/num_needs'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--rank', '500', '--threshold', '0.06', '--numneeds', '500'] +
['--rank2', '500', '--threshold2', '0.03'] +
['--costhreshold', '0.6', '--costransthreshold', '0.0'] +
['--wcosthreshold', '0.6', '--wcostransthreshold', '0.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.05', '--numneeds', '1000'] +
['--rank2', '500', '--threshold2', '0.04'] +
['--costhreshold', '0.6', '--costransthreshold', '0.0'] +
['--wcosthreshold', '0.6', '--wcostransthreshold', '0.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.04', '--numneeds', '2000'] +
['--rank2', '500', '--threshold2', '0.04'] +
['--costhreshold', '0.5', '--costransthreshold', '0.0'] +
['--wcosthreshold', '0.5', '--wcostransthreshold', '0.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.03', '--numneeds', '3000'] +
['--rank2', '500', '--threshold2', '0.05'] +
['--costhreshold', '0.5', '--costransthreshold', '0.0'] +
['--wcosthreshold', '0.5', '--wcostransthreshold', '0.0'])
# evaluation for the combined version of cosine and rescal algorithm
def combine_eval():
params = ['CombineCosineRescalEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/combine'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.02', '--cosinethreshold', '0.2'])
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.02', '--cosinethreshold', '0.3'])
# evaluation for the prediction intersection between cosine and rescal algorithm
def intersection_eval():
params = ['IntersectionEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/intersection'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.01', '--cosinethreshold', '0.5'])
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.005', '--cosinethreshold', '0.5'])
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.01', '--cosinethreshold', '0.6'])
luigi.run(params + ['--rank', '500', '--rescalthreshold', '0.005', '--cosinethreshold', '0.6'])
# evaluate different configurations of rescal (init, conv, lambda parameters)
def rescal_configuration_eval():
params = ['RESCALEvaluation'] + base_config() + ['--outputfolder', output_folder_config() + '/results/config'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--init', 'random'] + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--lambdaA', '10.0'] + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--lambdaR', '10.0'] + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--lambdaV', '10.0'] + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--lambdaA', '1.0', '--lambdaR', '1.0', '--lambdaV', '1.0'] + RESCAL_DEFAULT_PARAMS)
luigi.run(params + ['--lambdaA', '5.0', '--lambdaR', '5.0', '--lambdaV', '5.0'] +
['--rank', '500', '--threshold', '0.015'])
luigi.run(params + ['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'] +
['--rank', '500', '--threshold', '0.015'])
luigi.run(params + ['--lambdaA', '20.0', '--lambdaR', '20.0', '--lambdaV', '20.0'] +
['--rank', '500', '--threshold', '0.001'])
luigi.run(params + ['--conv', '1e-6'] + RESCAL_DEFAULT_PARAMS)
# evaluate if the matching qulity increases when overfitting is prevented with the lambda parameters and thus the
# rank can be increased
def lambda_rank_rescal_eval():
params = ['RESCALEvaluation'] + base_config() + \
['--outputfolder', output_folder_config() + '/results/lambda_rank'] + \
['--tensorfolder', output_folder_config() + '/tensor']
luigi.run(params + ['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'] +
['--rank', '500', '--threshold', '0.01'])
luigi.run(params + ['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'] +
['--rank', '1000', '--threshold', '0.01'])
params = ['CategoryEvaluation'] + base_config() + ['--allneeds', args.testdataset + '/allneeds.txt'] + \
['--outputfolder', output_folder_config() + '/results/lambda_rank'] + \
['--tensorfolder', output_folder_config() + '/tensor_category']
luigi.run(params + ['--rank', '500', '--threshold', '0.02'] +
['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'])
luigi.run(params + ['--rank', '1000', '--threshold', '0.02'] +
['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'])
# evaluate the (so far) optimal configuration of RESCAL (category slice + lambda parameters)
def optimal_rescal_eval():
params = ['CategoryEvaluation'] + base_config() + ['--allneeds', args.testdataset + '/allneeds.txt'] + \
['--outputfolder', output_folder_config() + '/results/optimal'] + \
['--tensorfolder', output_folder_config() + '/tensor_category']
luigi.run(params + ['--rank', '500', '--threshold', '0.02'] +
['--lambdaA', '5.0', '--lambdaR', '5.0', '--lambdaV', '5.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.025'] +
['--lambdaA', '5.0', '--lambdaR', '5.0', '--lambdaV', '5.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.015'] +
['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'])
luigi.run(params + ['--rank', '500', '--threshold', '0.02'] +
['--lambdaA', '10.0', '--lambdaR', '10.0', '--lambdaV', '10.0'])
if __name__ == '__main__':
# CLI processing
parser = argparse.ArgumentParser(description='link prediction evaluation on a test data set')
parser.add_argument('-testdataset',
action="store", dest="testdataset", required=True,
help="test data set folder of the evaluation (structure of folder must be like "
"e.g. testdataset_20141112.zip")
parser.add_argument('-gatehome',
action="store", dest="gatehome", required=True,
help="folder to gate home")
# Optional arguments.
parser.add_argument('-luigitmp',
action="store", dest="luigitmp", required=False,
help="folder to luigi tmp folder")
parser.add_argument('-java',
action='store', dest='java', required=False,
help='path to java')
parser.add_argument('-python',
action='store', dest='python', required=False,
help='path to python')
args = parser.parse_args()
# run the experiments
default_all_eval()
category_slice_eval()
maskrandom_eval()
content_slice_eval()
optimal_rescal_eval()
transitive_eval()
nohubneeds_eval()
keyword_slice_eval()
no_stopwords()
stemming_eval()
needtype_slice_eval()
rescal_configuration_eval()
cosinetrans_eval()
intersection_eval()
combine_eval()
connections_rescal_eval()
connection_rescalsim_eval()
num_needs_eval()
lambda_rank_rescal_eval()
rank_eval()
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import logging
import os
import sys
from contextlib import contextmanager
import mock
import psutil
from pants.pantsd.process_manager import (ProcessGroup, ProcessManager, ProcessMetadataManager,
swallow_psutil_exceptions)
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump
from pants.util.process_handler import subprocess
from pants_test.test_base import TestBase
PATCH_OPTS = dict(autospec=True, spec_set=True)
def fake_process(**kwargs):
proc = mock.create_autospec(psutil.Process, spec_set=True)
[setattr(getattr(proc, k), 'return_value', v) for k, v in kwargs.items()]
return proc
class TestProcessGroup(TestBase):
def setUp(self):
super(TestProcessGroup, self).setUp()
self.pg = ProcessGroup('test', metadata_base_dir=self.subprocess_dir)
def test_swallow_psutil_exceptions(self):
with swallow_psutil_exceptions():
raise psutil.NoSuchProcess('test')
def test_iter_processes(self):
with mock.patch('psutil.process_iter', **PATCH_OPTS) as mock_process_iter:
mock_process_iter.return_value = [5, 4, 3, 2, 1]
items = [item for item in self.pg.iter_processes()]
self.assertEqual(items, [5, 4, 3, 2, 1])
def test_iter_processes_filter_raises_psutil_exception(self):
"""If the filter triggers a psutil exception, skip the proc and continue."""
with mock.patch('psutil.process_iter', **PATCH_OPTS) as mock_process_iter:
def noop():
return True
def raises():
raise psutil.NoSuchProcess('a_test')
mock_process_iter.return_value = [
noop,
raises,
noop
]
items = [item for item in self.pg.iter_processes(proc_filter=lambda p: p())]
self.assertEqual([noop, noop], items)
def test_iter_processes_process_iter_raises_psutil_exception(self):
"""If psutil.process_iter raises the exception, silently stop iteration."""
def id_or_raise(o):
if isinstance(o, Exception):
raise o
else:
return o
with mock.patch('psutil.process_iter', **PATCH_OPTS) as mock_process_iter:
mock_process_iter.return_value= (id_or_raise(i)
for i in ['first',
psutil.NoSuchProcess('The Exception'),
'never seen'])
items = [item for item in self.pg.iter_processes()]
self.assertEqual(['first'], items)
def test_iter_processes_filtered(self):
with mock.patch('psutil.process_iter', **PATCH_OPTS) as mock_process_iter:
mock_process_iter.return_value = [5, 4, 3, 2, 1]
items = [item for item in self.pg.iter_processes(lambda x: x != 3)]
self.assertEqual(items, [5, 4, 2, 1])
def test_iter_instances(self):
with mock.patch('psutil.process_iter', **PATCH_OPTS) as mock_process_iter:
mock_process_iter.return_value = [
fake_process(name='a_test', pid=3, status=psutil.STATUS_IDLE),
fake_process(name='b_test', pid=4, status=psutil.STATUS_IDLE)
]
items = [item for item in self.pg.iter_instances()]
self.assertEqual(len(items), 2)
for item in items:
self.assertIsInstance(item, ProcessManager)
self.assertTrue('_test' in item.name)
class TestProcessMetadataManager(TestBase):
NAME = '_test_'
TEST_KEY = 'TEST'
TEST_VALUE = '300'
TEST_VALUE_INT = 300
BUILDROOT = '/mock_buildroot/'
def setUp(self):
super(TestProcessMetadataManager, self).setUp()
self.pmm = ProcessMetadataManager(metadata_base_dir=self.subprocess_dir)
def test_maybe_cast(self):
self.assertIsNone(self.pmm._maybe_cast(None, int))
self.assertEqual(self.pmm._maybe_cast('3333', int), 3333)
self.assertEqual(self.pmm._maybe_cast('ssss', int), 'ssss')
def test_get_metadata_dir_by_name(self):
self.pmm = ProcessMetadataManager(metadata_base_dir=self.BUILDROOT)
self.assertEqual(self.pmm._get_metadata_dir_by_name(self.NAME, self.BUILDROOT),
os.path.join(self.BUILDROOT, self.NAME))
def test_maybe_init_metadata_dir_by_name(self):
with mock.patch('pants.pantsd.process_manager.safe_mkdir', **PATCH_OPTS) as mock_mkdir:
self.pmm._maybe_init_metadata_dir_by_name(self.NAME)
mock_mkdir.assert_called_once_with(
self.pmm._get_metadata_dir_by_name(self.NAME, self.subprocess_dir))
def test_readwrite_metadata_by_name(self):
with temporary_dir() as tmpdir, \
mock.patch('pants.pantsd.process_manager.get_buildroot', return_value=tmpdir):
self.pmm.write_metadata_by_name(self.NAME, self.TEST_KEY, self.TEST_VALUE)
self.assertEqual(
self.pmm.read_metadata_by_name(self.NAME, self.TEST_KEY),
self.TEST_VALUE
)
self.assertEqual(
self.pmm.read_metadata_by_name(self.NAME, self.TEST_KEY, int),
self.TEST_VALUE_INT
)
def test_deadline_until(self):
with self.assertRaises(self.pmm.Timeout):
with self.captured_logging(logging.INFO) as captured:
self.pmm._deadline_until(lambda: False, 'the impossible', timeout=.5, info_interval=.1)
self.assertTrue(4 <= len(captured.infos()) <= 6,
'Expected between 4 and 6 infos, got: {}'.format(captured.infos()))
def test_wait_for_file(self):
with temporary_dir() as td:
test_filename = os.path.join(td, 'test.out')
safe_file_dump(test_filename, 'test')
self.pmm._wait_for_file(test_filename, timeout=.1)
def test_wait_for_file_timeout(self):
with temporary_dir() as td:
with self.assertRaises(self.pmm.Timeout):
self.pmm._wait_for_file(os.path.join(td, 'non_existent_file'), timeout=.1)
def test_await_metadata_by_name(self):
with temporary_dir() as tmpdir, \
mock.patch('pants.pantsd.process_manager.get_buildroot', return_value=tmpdir):
self.pmm.write_metadata_by_name(self.NAME, self.TEST_KEY, self.TEST_VALUE)
self.assertEquals(
self.pmm.await_metadata_by_name(self.NAME, self.TEST_KEY, .1),
self.TEST_VALUE
)
def test_purge_metadata(self):
with mock.patch('pants.pantsd.process_manager.rm_rf') as mock_rm:
self.pmm.purge_metadata_by_name(self.NAME)
self.assertGreater(mock_rm.call_count, 0)
def test_purge_metadata_error(self):
with mock.patch('pants.pantsd.process_manager.rm_rf') as mock_rm:
mock_rm.side_effect = OSError(errno.EACCES, os.strerror(errno.EACCES))
with self.assertRaises(ProcessManager.MetadataError):
self.pmm.purge_metadata_by_name(self.NAME)
self.assertGreater(mock_rm.call_count, 0)
class TestProcessManager(TestBase):
def setUp(self):
super(TestProcessManager, self).setUp()
# N.B. We pass in `metadata_base_dir` here because ProcessManager (itself a non-task/non-
# subsystem) depends on an initialized `GlobalOptions` subsystem for the value of
# `--pants-subprocessdir` in the default case. This is normally provided by subsystem
# dependencies in a typical pants run (and integration tests), but not in unit tests.
# Thus, passing this parameter here short-circuits the subsystem-reliant path for the
# purposes of unit testing without requiring adhoc subsystem initialization.
self.pm = ProcessManager('test', metadata_base_dir=self.subprocess_dir)
def test_process_properties(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(name='name',
cmdline=['cmd', 'line'],
status='status')
self.assertEqual(self.pm.cmdline, ['cmd', 'line'])
self.assertEqual(self.pm.cmd, 'cmd')
def test_process_properties_cmd_indexing(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(cmdline='')
self.assertEqual(self.pm.cmd, None)
def test_process_properties_none(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_asproc:
mock_asproc.return_value = None
self.assertEqual(self.pm.cmdline, None)
self.assertEqual(self.pm.cmd, None)
def test_get_subprocess_output(self):
test_str = '333'
self.assertEqual(self.pm.get_subprocess_output(['echo', '-n', test_str]), test_str)
def test_get_subprocess_output_interleaved(self):
cmd_payload = 'import sys; ' + 'sys.stderr.write("9"); sys.stdout.write("3"); ' * 3
cmd = [sys.executable, '-c', cmd_payload]
self.assertEqual(self.pm.get_subprocess_output(cmd), '333')
self.assertEqual(self.pm.get_subprocess_output(cmd, ignore_stderr=False), '939393')
self.assertEqual(self.pm.get_subprocess_output(cmd, stderr=subprocess.STDOUT), '939393')
def test_get_subprocess_output_oserror_exception(self):
with self.assertRaises(self.pm.ExecutionError):
self.pm.get_subprocess_output(['i_do_not_exist'])
def test_get_subprocess_output_failure_exception(self):
with self.assertRaises(self.pm.ExecutionError):
self.pm.get_subprocess_output(['false'])
def test_await_pid(self):
with mock.patch.object(ProcessManager, 'await_metadata_by_name') as mock_await:
self.pm.await_pid(5)
mock_await.assert_called_once_with(self.pm.name, 'pid', 5, mock.ANY)
def test_await_socket(self):
with mock.patch.object(ProcessManager, 'await_metadata_by_name') as mock_await:
self.pm.await_socket(5)
mock_await.assert_called_once_with(self.pm.name, 'socket', 5, mock.ANY)
def test_write_pid(self):
with mock.patch.object(ProcessManager, 'write_metadata_by_name') as mock_write:
self.pm.write_pid(31337)
mock_write.assert_called_once_with(self.pm.name, 'pid', '31337')
def test_write_socket(self):
with mock.patch.object(ProcessManager, 'write_metadata_by_name') as mock_write:
self.pm.write_socket('/path/to/unix/socket')
mock_write.assert_called_once_with(self.pm.name, 'socket', '/path/to/unix/socket')
def test_write_named_socket(self):
with mock.patch.object(ProcessManager, 'write_metadata_by_name') as mock_write:
self.pm.write_named_socket('pailgun', '31337')
mock_write.assert_called_once_with(self.pm.name, 'socket_pailgun', '31337')
def test_as_process(self):
sentinel = 3333
with mock.patch('psutil.Process', **PATCH_OPTS) as mock_proc:
mock_proc.return_value = sentinel
self.pm._pid = sentinel
self.assertEqual(self.pm._as_process(), sentinel)
def test_as_process_no_pid(self):
fake_pid = 3
with mock.patch('psutil.Process', **PATCH_OPTS) as mock_proc:
mock_proc.side_effect = psutil.NoSuchProcess(fake_pid)
self.pm._pid = fake_pid
with self.assertRaises(psutil.NoSuchProcess):
self.pm._as_process()
def test_as_process_none(self):
self.assertEqual(self.pm._as_process(), None)
def test_is_alive_neg(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = None
self.assertFalse(self.pm.is_alive())
mock_as_process.assert_called_once_with(self.pm)
def test_is_alive(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(name='test', pid=3, status=psutil.STATUS_IDLE)
self.assertTrue(self.pm.is_alive())
mock_as_process.assert_called_with(self.pm)
def test_is_alive_zombie(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(name='test', pid=3, status=psutil.STATUS_ZOMBIE)
self.assertFalse(self.pm.is_alive())
mock_as_process.assert_called_with(self.pm)
def test_is_alive_zombie_exception(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.side_effect = psutil.NoSuchProcess(0)
self.assertFalse(self.pm.is_alive())
mock_as_process.assert_called_with(self.pm)
def test_is_alive_stale_pid(self):
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(name='not_test', pid=3, status=psutil.STATUS_IDLE)
self.pm._process_name = 'test'
self.assertFalse(self.pm.is_alive())
mock_as_process.assert_called_with(self.pm)
def test_is_alive_extra_check(self):
def extra_check(process):
return False
with mock.patch.object(ProcessManager, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(name='test', pid=3, status=psutil.STATUS_IDLE)
self.assertFalse(self.pm.is_alive(extra_check))
mock_as_process.assert_called_with(self.pm)
def test_purge_metadata_aborts(self):
with mock.patch.object(ProcessManager, 'is_alive', return_value=True):
with self.assertRaises(self.pm.MetadataError):
self.pm.purge_metadata()
def test_purge_metadata_alive_but_forced(self):
with mock.patch.object(ProcessManager, 'is_alive', return_value=True), \
mock.patch('pants.pantsd.process_manager.rm_rf') as mock_rm_rf:
self.pm.purge_metadata(force=True)
self.assertGreater(mock_rm_rf.call_count, 0)
def test_kill(self):
with mock.patch('os.kill', **PATCH_OPTS) as mock_kill:
self.pm._pid = 42
self.pm._kill(0)
mock_kill.assert_called_once_with(42, 0)
def test_kill_no_pid(self):
with mock.patch('os.kill', **PATCH_OPTS) as mock_kill:
self.pm._kill(0)
self.assertFalse(mock_kill.called, 'If we have no pid, kills should noop gracefully.')
@contextmanager
def setup_terminate(self):
with mock.patch.object(ProcessManager, '_kill', **PATCH_OPTS) as mock_kill, \
mock.patch.object(ProcessManager, 'is_alive', **PATCH_OPTS) as mock_alive, \
mock.patch.object(ProcessManager, 'purge_metadata', **PATCH_OPTS) as mock_purge:
yield mock_kill, mock_alive, mock_purge
self.assertGreater(mock_alive.call_count, 0)
def test_terminate_quick_death(self):
with self.setup_terminate() as (mock_kill, mock_alive, mock_purge):
mock_kill.side_effect = OSError('oops')
mock_alive.side_effect = [True, False]
self.pm.terminate(kill_wait=.1)
self.assertEqual(mock_kill.call_count, 1)
self.assertEqual(mock_purge.call_count, 1)
def test_terminate_quick_death_no_purge(self):
with self.setup_terminate() as (mock_kill, mock_alive, mock_purge):
mock_kill.side_effect = OSError('oops')
mock_alive.side_effect = [True, False]
self.pm.terminate(purge=False, kill_wait=.1)
self.assertEqual(mock_kill.call_count, 1)
self.assertEqual(mock_purge.call_count, 0)
def test_terminate_already_dead(self):
with self.setup_terminate() as (mock_kill, mock_alive, mock_purge):
mock_alive.return_value = False
self.pm.terminate(purge=True)
self.assertEqual(mock_kill.call_count, 0)
self.assertEqual(mock_purge.call_count, 1)
def test_terminate_no_kill(self):
with self.setup_terminate() as (mock_kill, mock_alive, mock_purge):
mock_alive.return_value = True
with self.assertRaises(self.pm.NonResponsiveProcess):
self.pm.terminate(kill_wait=.1, purge=True)
self.assertEqual(mock_kill.call_count, len(ProcessManager.KILL_CHAIN))
self.assertEqual(mock_purge.call_count, 0)
@contextmanager
def mock_daemonize_context(self, chk_pre=True, chk_post_child=False, chk_post_parent=False):
with mock.patch.object(ProcessManager, 'post_fork_parent', **PATCH_OPTS) as mock_post_parent, \
mock.patch.object(ProcessManager, 'post_fork_child', **PATCH_OPTS) as mock_post_child, \
mock.patch.object(ProcessManager, 'pre_fork', **PATCH_OPTS) as mock_pre, \
mock.patch.object(ProcessManager, 'purge_metadata', **PATCH_OPTS) as mock_purge, \
mock.patch('os._exit', **PATCH_OPTS), \
mock.patch('os.chdir', **PATCH_OPTS), \
mock.patch('os.setsid', **PATCH_OPTS), \
mock.patch('os.waitpid', **PATCH_OPTS), \
mock.patch('os.fork', **PATCH_OPTS) as mock_fork:
yield mock_fork
mock_purge.assert_called_once_with(self.pm)
if chk_pre: mock_pre.assert_called_once_with(self.pm)
if chk_post_child: mock_post_child.assert_called_once_with(self.pm)
if chk_post_parent: mock_post_parent.assert_called_once_with(self.pm)
def test_daemonize_parent(self):
with self.mock_daemonize_context() as mock_fork:
mock_fork.side_effect = [1, 1] # Simulate the parent.
self.pm.daemonize(write_pid=False)
def test_daemonize_child(self):
with self.mock_daemonize_context(chk_post_child=True) as mock_fork:
mock_fork.side_effect = [0, 0] # Simulate the child.
self.pm.daemonize(write_pid=False)
def test_daemonize_child_parent(self):
with self.mock_daemonize_context(chk_post_parent=True) as mock_fork:
mock_fork.side_effect = [0, 1] # Simulate the childs parent.
self.pm.daemonize(write_pid=False)
def test_daemon_spawn_parent(self):
with self.mock_daemonize_context(chk_post_parent=True) as mock_fork:
mock_fork.return_value = 1 # Simulate the parent.
self.pm.daemon_spawn()
def test_daemon_spawn_child(self):
with self.mock_daemonize_context(chk_post_child=True) as mock_fork:
mock_fork.return_value = 0 # Simulate the child.
self.pm.daemon_spawn()
def test_callbacks(self):
# For coverage.
self.pm.pre_fork()
self.pm.post_fork_child()
self.pm.post_fork_parent()
| |
"""
- Rule for properties
1. Shortcuts: q, qdot, tau, t, ...
2. Numbers: ndofs, nframes, ...
"""
import os.path
import pydart_api as papi
import numpy as np
def init():
papi.init()
def create_world(step, skel_path=None):
return World(step, skel_path)
class World(object):
def __init__(self, step, skel_path=None):
self.skels = []
self.control_skel = None
if skel_path is not None:
self.id = papi.createWorldFromSkel(skel_path)
nskels = self.num_skeletons()
for i in range(nskels):
self.add_skeleton_from_id(i, (i == nskels - 1))
else:
self.id = papi.createWorld(step)
def add_skeleton(self, filename, friction=1.0, control=True):
self.skels += [Skeleton(self, filename, friction)]
if control:
self.control_skel = self.skels[-1]
def add_skeleton_from_id(self, _skel_id, control=True):
self.skels += [Skeleton(self, None, None, _skel_id)]
if control:
self.control_skel = self.skels[-1]
def num_skeletons(self):
return papi.numSkeletons(self.id)
@property
def skel(self):
""" returns the default control skeleton """
return self.control_skel
def time(self):
return papi.getWorldTime(self.id)
@property
def t(self):
return self.time()
def time_step(self):
return papi.getWorldTimeStep(self.id)
@property
def dt(self):
return self.time_step()
def set_time_step(self, _time_step):
papi.setWorldTimeStep(self.id, _time_step)
@dt.setter
def dt(self, _dt):
self.set_time_step(_dt)
def num_frames(self):
return papi.getWorldSimFrames(self.id)
@property
def nframes(self):
return self.num_frames()
def contacts(self):
n = papi.getWorldNumContacts(self.id)
contacts = papi.getWorldContacts(self.id, 7 * n)
return [contacts[7 * i: 7 * (i + 1)] for i in range(n)]
def reset(self):
papi.resetWorld(self.id)
def step(self):
papi.stepWorld(self.id)
def set_frame(self, i):
papi.setWorldSimFrame(self.id, i)
def render(self):
papi.render(self.id)
def __repr__(self):
return "<World.%d at %.4f>" % (self.id, self.t)
class Skeleton(object):
def __init__(self, _world, _filename=None, _friction=None, _id=None):
self.world = _world
self.filename = _filename
self.friction = _friction
if self.filename is not None:
self.id = papi.addSkeleton(self.world.id, _filename, _friction)
else:
self.id = _id
# Initialize dofs
_ndofs = papi.getSkeletonNumDofs(self.world.id, self.id)
self.dofs = [Dof(self, i) for i in range(_ndofs)]
self.name_to_dof = {dof.name: dof for dof in self.dofs}
# Initialize bodies
_nbodies = papi.getSkeletonNumBodies(self.world.id, self.id)
self.bodies = [Body(self, i) for i in range(_nbodies)]
self.name_to_body = {body.name: body for body in self.bodies}
def set_joint_damping(self, _damping):
papi.setSkeletonJointDamping(self.world.id, self.id, _damping)
def num_dofs(self):
return len(self.dofs)
@property
def ndofs(self):
return self.num_dofs()
def num_bodies(self):
return len(self.bodies)
@property
def nbodies(self):
return self.num_bodies()
def mass(self):
return papi.getSkeletonMass(self.world.id, self.id)
@property
def m(self):
return self.mass()
def mass_matrix(self):
M = np.zeros((self.ndofs, self.ndofs))
papi.getSkeletonMassMatrix(self.world.id, self.id, M)
return M
@property
def M(self):
return self.mass_matrix()
def positions(self):
return papi.getSkeletonPositions(self.world.id, self.id, self.ndofs)
@property
def q(self):
return self.positions()
def set_positions(self, _q):
papi.setSkeletonPositions(self.world.id, self.id, _q)
@q.setter
def q(self, _q):
""" Setter also updates the internal skeleton kinematics """
self.set_positions(_q)
def position_lower_limit(self):
return papi.getSkeletonPositionLowerLimit(self.world.id,
self.id, self.ndofs)
def position_upper_limit(self):
return papi.getSkeletonPositionUpperLimit(self.world.id,
self.id, self.ndofs)
@property
def q_lo(self):
return self.position_lower_limit()
@property
def q_hi(self):
return self.position_upper_limit()
def velocities(self):
return papi.getSkeletonVelocities(self.world.id, self.id, self.ndofs)
@property
def qdot(self):
return self.velocities()
def set_velocities(self, _qdot):
papi.setSkeletonVelocities(self.world.id, self.id, _qdot)
@qdot.setter
def qdot(self, _qdot):
""" Setter also updates the internal skeleton kinematics """
self.set_velocities(_qdot)
def states(self):
return np.concatenate((self.positions(), self.velocities()))
@property
def x(self):
return np.concatenate((self.positions(), self.velocities()))
def set_states(self, _x):
self.set_positions(_x[:self.ndofs])
self.set_velocities(_x[self.ndofs:])
@x.setter
def x(self, _x):
self.set_states(_x)
def coriolis_and_gravity_forces(self):
return papi.getSkeletonCoriolisAndGravityForces(self.world.id,
self.id, self.ndofs)
@property
def c(self):
return self.coriolis_and_gravity_forces()
def constraint_forces(self):
return papi.getSkeletonConstraintForces(self.world.id,
self.id, self.ndofs)
def body(self, query):
if isinstance(query, str):
return self.name_to_body[query]
elif isinstance(query, int):
return self.bodies[query]
else:
print 'No find...', query
return None
def body_index(self, _name):
return self.name_to_body[_name].id
def dof_index(self, _name):
return self.name_to_dof[_name].id
def world_com(self):
return papi.getSkeletonWorldCOM(self.world.id, self.id)
@property
def C(self):
return self.world_com()
@property
def COM(self):
return self.world_com()
def world_com_velocity(self):
return papi.getSkeletonWorldCOMVelocity(self.world.id, self.id)
@property
def Cdot(self):
return self.world_com_velocity()
def linear_momentum(self):
return self.Cdot * self.m
@property
def P(self):
return self.linear_momentum()
def forces(self):
return self._tau
@property
def tau(self):
return self.forces()
def set_forces(self, _tau):
self._tau = _tau
papi.setSkeletonForces(self.world.id, self.id, _tau)
@tau.setter
def tau(self, _tau):
self.set_forces(_tau)
def force_lower_limit(self):
return papi.getSkeletonForceLowerLimit(self.world.id,
self.id, self.ndofs)
def force_upper_limit(self):
return papi.getSkeletonForceUpperLimit(self.world.id,
self.id, self.ndofs)
@property
def tau_lo(self):
return self.force_lower_limit()
@property
def tau_hi(self):
return self.force_upper_limit()
def approx_inertia(self, axis):
"""Calculates the point-masses approximated inertia
with respect to the given axis """
axis = np.array(axis) / np.linalg.norm(axis)
I = 0
C = self.C
for body in self.bodies:
d = body.C - C
# Subtract the distance along the axis
r_sq = np.linalg.norm(d) ** 2 - np.linalg.norm(d.dot(axis)) ** 2
I += body.m * r_sq
return I
def approx_inertia_x(self):
return self.approx_inertia([1, 0, 0])
def approx_inertia_y(self):
return self.approx_inertia([0, 1, 0])
def approx_inertia_z(self):
return self.approx_inertia([0, 0, 1])
def external_contacts_and_body_id(self):
cid_cnt = dict()
contacts = []
for body in self.bodies:
for c in body.contacts():
contacts += [(c, body.id)]
cid = int(c[6])
if cid not in cid_cnt:
cid_cnt[cid] = 1
else:
cid_cnt[cid] += 1
return [(c, bid) for (c, bid) in contacts if cid_cnt[int(c[6])] < 2]
def contacted_bodies(self):
return [body for body in self.bodies if body.num_contacts() > 0]
def world_cop(self):
bodies = self.contacted_bodies()
if len(bodies) == 0:
return None
pos_list = [b.C for b in bodies]
avg = sum(pos_list) / len(pos_list)
return avg
@property
def COP(self):
return self.world_cop()
def contacted_body_names(self):
return [body.name for body in self.contacted_bodies()]
def render(self):
papi.renderSkeleton(self.world.id, self.id)
def render_with_color(self, r, g, b, a=1.0):
papi.renderSkeletonWithColor(self.world.id, self.id, r, g, b, a)
def __repr__(self):
return '<Skel.%d.%s>' % (self.id, os.path.basename(self.filename))
class Body(object):
def __init__(self, _skel, _id):
self.skel = _skel
self._id = _id
self.name = papi.getSkeletonBodyName(self.wid, self.sid, self.id)
@property
def id(self):
return self._id
@property
def wid(self):
return self.skel.world.id
@property
def sid(self):
return self.skel.id
def num_contacts(self):
return papi.getBodyNodeNumContacts(self.wid, self.sid, self.id)
def contacts(self):
n = self.num_contacts()
contacts = papi.getBodyNodeContacts(self.wid, self.sid, self.id, 7 * n)
return [contacts[7 * i: 7 * (i + 1)] for i in range(n)]
def mass(self):
return papi.getBodyNodeMass(self.wid, self.sid, self.id)
@property
def m(self):
return self.mass()
def inertia(self):
return papi.getBodyNodeInertia(self.wid, self.sid, self.id)
@property
def I(self):
return self.inertia()
def local_com(self):
return papi.getBodyNodeLocalCOM(self.wid, self.sid, self.id)
def world_com(self):
return papi.getBodyNodeWorldCOM(self.wid, self.sid, self.id)
@property
def C(self):
return self.world_com()
def world_com_velocity(self):
return papi.getBodyNodeWorldCOMVelocity(self.wid, self.sid, self.id)
@property
def Cdot(self):
return self.world_com_velocity()
def transformation(self):
return papi.getBodyNodeTransformation(self.wid, self.sid, self.id)
@property
def T(self):
return self.transformation()
def world_linear_jacobian(self, offset=None):
if offset is None:
offset = np.zeros(3)
J = np.zeros((3, self.skel.ndofs))
papi.getBodyNodeWorldLinearJacobian(self.wid, self.sid,
self.id, offset, J)
return J
@property
def J(self):
return self.world_linear_jacobian()
def add_ext_force(self, f):
papi.addBodyNodeExtForce(self.wid, self.sid, self.id, f)
def add_ext_force_at(self, f, offset):
papi.addBodyNodeExtForceAt(self.wid, self.sid, self.id, f, offset)
def __repr__(self):
return '<Body.%d.%s>' % (self.id, self.name)
class Dof(object):
def __init__(self, _skel, _id):
self.skel = _skel
self.id = _id
self.name = papi.getSkeletonDofName(self.wid, self.sid, self.id)
@property
def wid(self):
return self.skel.world.id
@property
def sid(self):
return self.skel.id
def __repr__(self):
return '<Dof.%s at %d>' % (self.name, self.id)
| |
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
import onnx
import pytest
from onnx_chainer import export
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelChecker
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
# cast
# {'ops': 'cast', 'input_shape': (1, 5),
# 'input_argname': 'x',
# 'args': {'typ': np.float16}},
{'ops': 'cast', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'typ': np.float64}},
# depth2space
{'ops': 'depth2space', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
# pad
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'constant'},
'name': 'pad_constant'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'reflect'},
'name': 'pad_reflect'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'edge'},
'name': 'pad_edge'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((1, 3), (2, 0), (7, 1), (4, 4)),
'mode': 'constant'},
'name': 'pad_imbalance_pad_width'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'constant',
'constant_values': -1},
'name': 'pad_with_constant_values'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': 2,
'mode': 'constant'},
'name': 'pad_scalar_pad_width'},
# reshape
{'ops': 'reshape', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'shape': (1, 2, 1, 3)}},
# space2depth
{'ops': 'space2depth', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
# split_axis
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': True},
'name': 'split_axis_force_tuple_true'},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': False},
'name': 'split_axis_force_tuple_false'},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': [1, 2], 'axis': 1},
'name': 'split_axis_list'},
# squeeze
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2),
'input_argname': 'x',
'args': {'axis': None},
'name': 'squeeze_axis_none'},
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2, 1),
'input_argname': 'x',
'args': {'axis': (2, 4)}},
# swapaxes
{'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x',
'args': {'axis1': 1, 'axis2': 2}},
{'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x',
'args': {'axis1': -3, 'axis2': -1}},
# tile
{'ops': 'tile', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'reps': (1, 2)}},
# transpose
{'ops': 'transpose', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'axes': None}},
# copy
{'ops': 'copy', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'dst': -1}},
# get_item
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': slice(0, 2)},
'name': 'get_item_0to2'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(1))},
'name': 'get_item_to1'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(1, None))},
'name': 'get_item_1tonone'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': 0},
'name': 'get_item_0'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': -1},
'name': 'get_item_minus_1'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': np.array(0)},
'name': 'get_item_npscalar0'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (None, slice(0, 2))},
'name': 'get_item_none_0to2'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (Ellipsis, slice(0, 2))},
'name': 'get_item_ellipsis_0to2'},
# get_item, combine newaxis, slice, single index, ellipsis
{'ops': 'get_item', 'input_shape': (2, 2, 3, 3, 3, 4),
'input_argname': 'x',
'args': {'slices': (0, None, Ellipsis, 0, None, slice(0, 2), None, 0)},
'name': 'get_item_complicated'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(None), slice(0, 1), slice(None, 2))},
'name': 'get_item_start_from_none'},
# expand_dims
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': 0},
'name': 'expand_dims_0'},
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': 1},
'name': 'expand_dims_1'},
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': -2},
'name': 'expand_dims_minus2'},
# repeat
{'ops': 'repeat', 'input_shape': (3,),
'input_argname': 'x', 'args': {'repeats': 2},
'name': 'repeat_ndim1'},
{'ops': 'repeat', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'repeats': 2, 'axis': 1},
'name': 'repeat_with_axis'},
{'ops': 'repeat', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'repeats': 2},
'name': 'repeat_default_axis'},
# separate
{'ops': 'separate', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {}, 'name': 'separate_axis0'},
{'ops': 'separate', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'axis': 1}, 'name': 'separate_axis1'},
{'ops': 'separate', 'input_shape': (1, 2, 3),
'input_argname': 'x', 'args': {}, 'name': 'separate_single_output'},
# moveaxis
{'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'source': 0, 'destination': -1}},
{'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'source': (0, 3), 'destination': (2, 0)}},
# rollaxis
{'ops': 'rollaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'axis': 2, 'start': 0}},
)
class TestArrayOperators(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = getattr(F, ops)
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
self.model = Model(self.ops, self.args, self.input_argname)
self.x = input_generator.increasing(*self.input_shape)
def test_output(self):
name = self.ops
if hasattr(self, 'name'):
name = self.name
self.expect(
self.model, self.x, name=name, expected_num_initializers=0)
class TestGetItem(ONNXModelChecker):
# When chainer.testing.parameterize is used with list or ndarray parameter,
# it causes regex warning. To resolve, use pytest's parameterize.
@pytest.mark.parametrize(
'name,slices', [
('gather_axis0', ([[0, 1], [0, 1]],)),
('gather_axis1', (slice(None), [[0, 1], [1, 2]], slice(None))),
('gather_axis2', (slice(None), slice(None), [[0, 1], [1, 2]])),
('gather_ndarray', (
Ellipsis, np.array([[0, 1], [1, 2]], dtype=np.int64))),
('gather_before_squeezed', (slice(None), 0, [[0, 1], [2, 3]])),
('gather_after_squeezed', (slice(None), [[0, 1], [1, 2]], 0)),
('gather_unsqueezed', (
slice(None), None, [[0, 1], [1, 2]], slice(None))),
('gathernd', [[0, 1], [1, 2]]),
('gathernd_slice_none', [[0, 1], [0, 1], slice(None)]),
('gathernd_full_idx', [[0, 1], [0, 1], [2, 3]]),
('gathernd_before_slice', [0, [0, 1], [2, 3]]),
('gathernd_after_slice', [[0, 1], [0, 2], 0]),
('gathernd_unsqueezed', [[0, 1], [0, 2], None])
])
def test_get_item_gather(self, name, slices):
skip_opsets = None
if name.startswith('gathernd'):
skip_opsets = tuple(range(7, 11))
name = 'get_item_' + name
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
self.expect(
model, x, name=name, expected_num_initializers=0,
skip_opset_version=skip_opsets)
@pytest.mark.parametrize(
'name,slices', [
('step1', [slice(1, None, 1)]),
('step2', [slice(None, None, None), slice(None, 4, 2)]),
('step_neg1', [slice(None, None, -1)]),
('step_neg2', [slice(None, None, None), slice(4, None, -2)]),
])
def test_get_item_slice_step(self, name, slices):
skip_opsets = tuple(range(7, 11))
name = 'get_item_' + name
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
self.expect(
model, x, name=name, expected_num_initializers=0,
skip_opset_version=skip_opsets)
class TestGetItemError(object):
@pytest.mark.parametrize('slices', [
[[0, 1], [1, 2]], [slice(None, None, 2)]
])
def test_get_item_unsupported(self, slices):
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
with pytest.raises(ValueError):
export(model, x, opset_version=7)
@pytest.mark.skipif(
onnx.defs.onnx_opset_version() < 11, reason='not support GatherND')
@pytest.mark.parametrize(
'slices', [
[[0, 1], 0, [0, 1]],
[slice(None), [0, 1], [0, 1]],
[None, [0, 1], [0, 1]]
]
)
def test_get_item_unsupported_advanced_index(self, slices):
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
with pytest.raises(ValueError):
export(model, x)
class TestConcat(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, x1, x2):
return F.concat((x1, x2))
self.model = Model()
self.x1 = input_generator.increasing(2, 5)
self.x2 = input_generator.increasing(2, 4)
def test_output(self):
self.expect(self.model, (self.x1, self.x2))
class TestWhere(ONNXModelTest):
def test_output(self):
model = chainer.Sequential(
F.where
)
cond = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.bool)
x = input_generator.increasing(2, 3)
y = np.zeros((2, 3), np.float32)
self.expect(model, (cond, x, y), skip_opset_version=[7, 8])
class TestResizeImages(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = ops
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
# (batch, channel, height, width) = (1, 1, 2, 2)
self.x = np.array([[[[64, 32], [64, 32]]]], np.float32)
# 2x upsampling
args = {'output_shape': (4, 4)}
self.model = Model(F.resize_images, args, 'x')
def test_output(self):
# FIXME(syoyo): Currently the test will fail due to the different
# behavior of bilinear interpolation between Chainer and onnxruntime.
# So disable output value check for a while.
#
# Currently Chainer will give [64, 53.333336, 42.666668, 32]
# (same result with tensorflow r1.13.1 with `align_corners=True`),
# while onnxruntime gives [64, 48, 32, 32]
# (same result with tensorflow r1.13.1 with `align_corners=False`)
#
# However, the correct behavior will be [64, 54, 40, 32].
# (cv2.resize and tensorflow master(r1.14 or r2.0) after this fix:
# https://github.com/tensorflow/tensorflow/issues/6720)
self.check_out_values = None # Skip output value check
with testing.assert_warns(UserWarning):
self.expect(self.model, self.x, expected_num_initializers=0)
@testing.parameterize(
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {},
'name': 'stack_default'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 1},
'name': 'stack_axis1'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 2},
'name': 'stack_axis2'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': -1},
'name': 'stack_axis_neg'},
{'ops': 'vstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'vstack_ndim0'},
{'ops': 'vstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'vstack_ndim1'},
{'ops': 'vstack', 'in_shapes': [(3, 4), (2, 4)], 'kwargs': {},
'name': 'vstack_ndim2'},
{'ops': 'hstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'hstack_ndim0'},
{'ops': 'hstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'hstack_ndim1'},
{'ops': 'hstack', 'in_shapes': [(3, 4), (3, 2)], 'kwargs': {},
'name': 'hstack_ndim2'},
{'ops': 'dstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'dstack_ndim0'},
{'ops': 'dstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'dstack_ndim1'},
{'ops': 'dstack', 'in_shapes': [(3, 2), (3, 2)], 'kwargs': {},
'name': 'dstack_ndim2'},
{'ops': 'dstack', 'in_shapes': [(3, 2, 2), (3, 2, 1)], 'kwargs': {},
'name': 'dstack_ndim3'},
)
class TestStack(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self, ops, kwargs):
super(Model, self).__init__()
self.ops = getattr(F, ops)
self.kwargs = kwargs
def __call__(self, *xs):
return self.ops(xs, **self.kwargs)
model = Model(ops=self.ops, kwargs=self.kwargs)
if hasattr(self, 'inputs'):
xs = [np.array(value, dtype=np.float32) for value in self.inputs]
else:
xs = [input_generator.increasing(*shape) for
shape in self.in_shapes]
self.expect(model, xs, name=self.name)
class TestShape(ONNXModelTest):
def test_output(self):
from onnx_chainer.replace_func import as_funcnode
class Model(chainer.Chain):
def __init__(self):
super().__init__()
@as_funcnode('Shape')
def shape(self, x):
# ONNX Shape operator constrains to return int64 type
return np.array(x.shape, dtype=np.int64)
def forward(self, x):
# use shape method instead of x.shape to connect graph.
return self.shape(x)
model = Model()
x = input_generator.increasing(3, 4, 5)
self.expect(model, (x,))
class TestDynamicReshape(ONNXModelTest):
def test_output(self):
from onnx_chainer.replace_func import as_funcnode
class Model(chainer.Chain):
def __init__(self):
super().__init__()
@as_funcnode('Reshape')
def dynamic_reshape(self, x, shape):
# shape is expected as variable type
return F.reshape(x, tuple(shape.array))
def forward(self, x, shape):
return self.dynamic_reshape(x, shape)
model = Model()
x = input_generator.increasing(3, 4, 5)
shape = np.array([12, 5], dtype=np.int64)
def check_no_param(onnx_model, path):
assert not any(['param' in v.name for v in onnx_model.graph.input])
self.expect(model, (x, shape), custom_model_test_func=check_no_param)
@testing.parameterize(
{'kwargs': {}, 'name': 'permutate'},
{'kwargs': {'inv': True}, 'name': 'permutate_inv'},
{'kwargs': {'axis': 1}, 'name': 'permutate_axis1'},
{'kwargs': {'axis': 1, 'inv': True}, 'name': 'permutate_axis1_inv'},
)
class TestPermutate(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self, kwargs):
super(Model, self).__init__()
self.kwargs = kwargs
def forward(self, x, indices):
return F.permutate(x, indices, **self.kwargs)
model = Model(kwargs=self.kwargs)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
if self.kwargs.get('axis') == 1:
indices = np.array([1, 0], np.int32)
else:
indices = np.array([2, 0, 1], np.int32)
self.expect(model, (x, indices), name=self.name,
skip_opset_version=[7, 8])
@testing.parameterize(
{'in_shapes': [(3, 4)], 'name': 'transpose_sequence_single_input'},
{'in_shapes': [(1, 3), (1, 3)],
'name': 'transpose_sequence_single_output'},
{'in_shapes': [(2, 3), (2, 3), (2, 3), (2, 3)],
'name': 'transpose_sequence_same_shape'},
)
class TestTransposeSequence(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, *xs):
return F.transpose_sequence(xs)
model = Model()
xs = [input_generator.increasing(*shape) for
shape in self.in_shapes]
self.expect(model, xs, name=self.name)
class TestSelectItem(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def forward(self, x, t):
return F.select_item(x, t)
model = Model()
x = input_generator.increasing(3, 5)
t = np.array([4, 1, 0], dtype=np.int32)
self.expect(
model, (x, t), expected_num_initializers=0,
skip_opset_version=list(range(1, 9)))
| |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from email import utils
from django.db import connection
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.addons.models import (
Addon, CompatOverride, CompatOverrideRange, IncompatibleVersions)
from olympia.applications.models import AppVersion
from olympia.files.models import File
from olympia.versions.models import ApplicationsVersions, Version
from services import update
class VersionCheckMixin(object):
def get(self, data):
up = update.Update(data)
up.cursor = connection.cursor()
return up
class TestDataValidate(VersionCheckMixin, TestCase):
fixtures = ['base/addon_3615', 'base/appversion']
def setUp(self):
super(TestDataValidate, self).setUp()
self.good_data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
def test_app_os(self):
data = self.good_data.copy()
data['appOS'] = 'something %s penguin' % amo.PLATFORM_LINUX.api_name
form = self.get(data)
assert form.is_valid()
assert form.data['appOS'] == amo.PLATFORM_LINUX.id
def test_app_version_fails(self):
data = self.good_data.copy()
del data['appID']
form = self.get(data)
assert not form.is_valid()
def test_app_version_wrong(self):
data = self.good_data.copy()
data['appVersion'] = '67.7'
form = self.get(data)
# If you pass through the wrong version that's fine
# you will just end up with no updates because your
# version_int will be out.
assert form.is_valid()
def test_app_version(self):
data = self.good_data.copy()
form = self.get(data)
assert form.is_valid()
assert form.data['version_int'] == 3070000001000
def test_sql_injection(self):
data = self.good_data.copy()
data['id'] = "'"
up = self.get(data)
assert not up.is_valid()
def test_inactive(self):
addon = Addon.objects.get(pk=3615)
addon.update(disabled_by_user=True)
up = self.get(self.good_data)
assert not up.is_valid()
def test_soft_deleted(self):
addon = Addon.objects.get(pk=3615)
addon.update(status=amo.STATUS_DELETED)
up = self.get(self.good_data)
assert not up.is_valid()
def test_no_version(self):
data = self.good_data.copy()
del data['version']
up = self.get(data)
assert up.is_valid()
def test_unlisted_addon(self):
"""Don't provide updates for unlisted addons."""
addon = Addon.objects.get(pk=3615)
addon.update(is_listed=False)
up = self.get(self.good_data)
assert not up.is_valid()
class TestLookup(VersionCheckMixin, TestCase):
fixtures = ['addons/update', 'base/appversion']
def setUp(self):
super(TestLookup, self).setUp()
self.addon = Addon.objects.get(id=1865)
self.platform = None
self.version_int = 3069900200100
self.app = amo.APP_IDS[1]
self.version_1_0_2 = 66463
self.version_1_1_3 = 90149
self.version_1_2_0 = 105387
self.version_1_2_1 = 112396
self.version_1_2_2 = 115509
def get(self, *args):
data = {
'id': self.addon.guid,
'appID': args[2].guid,
'appVersion': 1, # this is going to be overridden
'appOS': args[3].api_name if args[3] else '',
'reqVersion': '',
}
# Allow version to be optional.
if args[0]:
data['version'] = args[0]
up = super(TestLookup, self).get(data)
assert up.is_valid()
up.data['version_int'] = args[1]
up.get_update()
return (up.data['row'].get('version_id'),
up.data['row'].get('file_id'))
def change_status(self, version, status):
version = Version.objects.get(pk=version)
file = version.files.all()[0]
file.status = status
file.save()
return version
def change_version(self, version, name):
Version.objects.get(pk=version).update(version=name)
def test_low_client(self):
"""
Version 3.0a1 of Firefox is 3000000001100 and version 1.0.2 of the
add-on is returned.
"""
version, file = self.get('', '3000000001100',
self.app, self.platform)
assert version == self.version_1_0_2
def test_new_client(self):
"""
Version 3.0.12 of Firefox is 3069900200100 and version 1.2.2 of the
add-on is returned.
"""
version, file = self.get('', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_2
def test_min_client(self):
"""
Version 3.7a5pre of Firefox is 3070000005000 and version 1.1.3 of
the add-on is returned, because all later ones are set to minimum
version of 3.7a5.
"""
for version in Version.objects.filter(pk__gte=self.version_1_2_0):
appversion = version.apps.all()[0]
appversion.min = AppVersion.objects.get(pk=325) # 3.7a5
appversion.save()
version, file = self.get('', '3070000005000', # 3.7a5pre
self.app, self.platform)
assert version == self.version_1_1_3
def test_new_client_ordering(self):
"""
Given the following:
* Version 15 (1 day old), max application_version 3.6*
* Version 12 (1 month old), max application_version 3.7a
We want version 15, even though version 12 is for a higher version.
This was found in https://bugzilla.mozilla.org/show_bug.cgi?id=615641.
"""
application_version = ApplicationsVersions.objects.get(pk=77550)
application_version.max_id = 350
application_version.save()
# Version 1.2.2 is now a lower max version.
application_version = ApplicationsVersions.objects.get(pk=88490)
application_version.max_id = 329
application_version.save()
version, file = self.get('', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_2
def test_public_not_beta(self):
"""
If the addon status is public and you are not asking
for a beta version, then you get a public version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
assert self.addon.status == amo.STATUS_PUBLIC
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_public_beta(self):
"""
If the addon status is public, you are in beta and the file is
beta, the you get a beta.
"""
self.change_version(self.version_1_2_0, '1.2beta')
self.change_status(self.version_1_2_0, amo.STATUS_BETA)
self.change_status(self.version_1_2_1, amo.STATUS_BETA)
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_can_downgrade(self):
"""
Check that we can downgrade, if 1.2.0 gets admin disabled
and the oldest public version is now 1.1.3.
"""
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
for v in Version.objects.filter(pk__gte=self.version_1_2_1):
v.delete()
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
assert version == self.version_1_1_3
def test_public_pending_exists(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. In this case, because the
file is pending, we are looking for something public.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
self.change_version(self.version_1_2_0, '1.2beta')
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_public_pending_no_file_beta(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. If there are no files,
find a public version.
"""
self.change_version(self.version_1_2_0, '1.2beta')
Version.objects.get(pk=self.version_1_2_0).files.all().delete()
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
dest = Version.objects.get(pk=self.version_1_2_2)
assert dest.addon.status == amo.STATUS_PUBLIC
assert dest.files.all()[0].status == amo.STATUS_PUBLIC
assert version == dest.pk
def test_public_pending_not_exists(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. In this case, because the
file is pending, we are looking for a public version.
"""
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
self.change_version(self.version_1_2_0, '1.2beta')
self.change_status(self.version_1_2_2, amo.STATUS_BETA)
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_not_public(self):
"""
If the addon status is not public, then the update only
looks for files within that one version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_NULL)
self.addon.update(status=amo.STATUS_NULL)
version, file = self.get('1.2.1', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_platform_does_not_exist(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
assert version == self.version_1_2_1
def test_platform_exists(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_2
def test_file_for_platform(self):
"""If client passes a platform, make sure we get the right file."""
version = Version.objects.get(pk=self.version_1_2_2)
file_one = version.files.all()[0]
file_one.platform = amo.PLATFORM_LINUX.id
file_one.save()
file_two = File(version=version, filename='foo', hash='bar',
platform=amo.PLATFORM_WIN.id,
status=amo.STATUS_PUBLIC)
file_two.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_2
assert file == file_one.pk
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_WIN)
assert version == self.version_1_2_2
assert file == file_two.pk
def test_file_preliminary(self):
"""
If there's a newer file in prelim. review it won't show up. This is
a test for https://bugzilla.mozilla.org/show_bug.cgi?id=620749
"""
version = Version.objects.get(pk=self.version_1_2_2)
file = version.files.all()[0]
file.status = amo.STATUS_LITE
file.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_1
def test_file_preliminary_addon(self):
"""
If the addon is in prelim. review, show the highest file with
prelim, which in this case is 1.2.1
"""
for status in amo.LITE_STATUSES:
self.addon.update(status=status)
# Since we're asking for an update from version 1.2, and
# we want to serve a prelim update, 1.2 needs to be
# prelim as well.
self.change_status(self.version_1_2_0, amo.STATUS_LITE)
self.change_status(self.version_1_2_1, amo.STATUS_LITE)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_1
def test_file_preliminary_odd_statuses(self):
"""
Test that we serve prelim updates even when current version is
disabled or deleted.
"""
self.addon.update(status=amo.STATUS_LITE)
self.change_status(self.version_1_2_1, amo.STATUS_LITE)
# Current version disabled.
self.change_status(self.version_1_2_0, amo.STATUS_DISABLED)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_1
# Current version deleted.
Version.objects.get(pk=self.version_1_2_0).delete()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_1
def test_file_preliminary_ex_full_addon(self):
"""
If the addon is in prelim. review, user has a full reviewed version.
Show the most recent full reviewed version.
"""
self.addon.update(status=amo.STATUS_LITE)
self.change_status(self.version_1_2_2, amo.STATUS_LITE)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
assert version == self.version_1_2_1
class TestDefaultToCompat(VersionCheckMixin, TestCase):
"""
Test default to compatible with all the various combinations of input.
"""
fixtures = ['addons/default-to-compat']
def setUp(self):
super(TestDefaultToCompat, self).setUp()
self.addon = Addon.objects.get(id=337203)
self.platform = None
self.app = amo.APP_IDS[1]
self.app_version_int_3_0 = 3000000200100
self.app_version_int_4_0 = 4000000200100
self.app_version_int_5_0 = 5000000200100
self.app_version_int_6_0 = 6000000200100
self.app_version_int_7_0 = 7000000200100
self.app_version_int_8_0 = 8000000200100
self.ver_1_0 = 1268881
self.ver_1_1 = 1268882
self.ver_1_2 = 1268883
self.ver_1_3 = 1268884
self.expected = {
'3.0-strict': None, '3.0-normal': None, '3.0-ignore': None,
'4.0-strict': self.ver_1_0,
'4.0-normal': self.ver_1_0,
'4.0-ignore': self.ver_1_0,
'5.0-strict': self.ver_1_2,
'5.0-normal': self.ver_1_2,
'5.0-ignore': self.ver_1_2,
'6.0-strict': self.ver_1_3,
'6.0-normal': self.ver_1_3,
'6.0-ignore': self.ver_1_3,
'7.0-strict': self.ver_1_3,
'7.0-normal': self.ver_1_3,
'7.0-ignore': self.ver_1_3,
'8.0-strict': None,
'8.0-normal': self.ver_1_3,
'8.0-ignore': self.ver_1_3,
}
def create_override(self, **kw):
co = CompatOverride.objects.create(
name='test', guid=self.addon.guid, addon=self.addon
)
default = dict(compat=co, app=self.app.id, min_version='0',
max_version='*', min_app_version='0',
max_app_version='*')
default.update(kw)
CompatOverrideRange.objects.create(**default)
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def get(self, **kw):
up = super(TestDefaultToCompat, self).get({
'reqVersion': 1,
'id': self.addon.guid,
'version': kw.get('item_version', '1.0'),
'appID': self.app.guid,
'appVersion': kw.get('app_version', '3.0'),
})
assert up.is_valid()
up.compat_mode = kw.get('compat_mode', 'strict')
up.get_update()
return up.data['row'].get('version_id')
def check(self, expected):
"""
Checks Firefox versions 3.0 to 8.0 in each compat mode and compares it
to the expected version.
"""
versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']
modes = ['strict', 'normal', 'ignore']
for version in versions:
for mode in modes:
assert self.get(app_version=version, compat_mode=mode) == (
expected['-'.join([version, mode])])
def test_baseline(self):
# Tests simple add-on (non-binary-components, non-strict).
self.check(self.expected)
def test_binary_components(self):
# Tests add-on with binary_components flag.
self.update_files(binary_components=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_extension_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
def test_binary_component_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.update_files(binary_components=True)
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': None,
})
self.check(self.expected)
def test_strict_opt_in(self):
# Tests add-on with opt-in strict compatibility
self.update_files(strict_compatibility=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_max_addon_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a max wildcard.
self.create_override(min_version='1.2', max_version='1.3',
min_app_version='5.0', max_app_version='6.*')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_max_app_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a min/max wildcard for the app.
self.create_override(min_version='1.2', max_version='1.3')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
'7.0-normal': self.ver_1_1,
'8.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_both_wildcards(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a wildcard for both addon version and app
# version.
self.create_override(min_app_version='7.0', max_app_version='*')
self.expected.update({
'7.0-normal': None,
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_invalid_version(self):
# Tests compat override range where version doesn't match our
# versioning scheme. This results in no versions being written to the
# incompatible_versions table.
self.create_override(min_version='ver1', max_version='ver2')
assert IncompatibleVersions.objects.all().count() == 0
def test_min_max_version(self):
# Tests the minimum requirement of the app maxVersion.
av = self.addon.current_version.apps.all()[0]
av.min_id = 233 # Firefox 3.0.
av.max_id = 268 # Firefox 3.5.
av.save()
self.expected.update({
'3.0-strict': self.ver_1_3,
'3.0-ignore': self.ver_1_3,
'4.0-ignore': self.ver_1_3,
'5.0-ignore': self.ver_1_3,
'6.0-strict': self.ver_1_2,
'6.0-normal': self.ver_1_2,
'7.0-strict': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
class TestResponse(VersionCheckMixin, TestCase):
fixtures = ['base/addon_3615', 'base/seamonkey']
def setUp(self):
super(TestResponse, self).setUp()
self.addon_one = Addon.objects.get(pk=3615)
self.good_data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
self.mac = amo.PLATFORM_MAC
self.win = amo.PLATFORM_WIN
def test_bad_guid(self):
data = self.good_data.copy()
data["id"] = "garbage"
up = self.get(data)
assert up.get_rdf() == up.get_bad_rdf()
def test_no_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
data = self.good_data.copy()
data["appOS"] = self.win.api_name
up = self.get(data)
assert up.get_rdf()
assert up.data['row']['file_id'] == file.pk
data["appOS"] = self.mac.api_name
up = self.get(data)
assert up.get_rdf() == up.get_no_updates_rdf()
def test_different_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
file_pk = file.pk
file.id = None
file.platform = self.mac.id
file.save()
mac_file_pk = file.pk
data = self.good_data.copy()
data['appOS'] = self.win.api_name
up = self.get(data)
up.is_valid()
up.get_update()
assert up.data['row']['file_id'] == file_pk
data['appOS'] = self.mac.api_name
up = self.get(data)
up.is_valid()
up.get_update()
assert up.data['row']['file_id'] == mac_file_pk
def test_good_version(self):
up = self.get(self.good_data)
up.is_valid()
up.get_update()
assert up.data['row']['hash'].startswith('sha256:3808b13e')
assert up.data['row']['min'] == '2.0'
assert up.data['row']['max'] == '4.0'
def test_beta_version(self):
file = File.objects.get(pk=67442)
file.status = amo.STATUS_BETA
file.save()
beta_version = '2.0.58 beta'
version = file.version
version.version = beta_version
version.save()
# Changing the status of the only reviewed file resets the
# add-on status to UNREVIEWED. Change it back to public.
version.addon.update(status=amo.STATUS_PUBLIC)
data = self.good_data.copy()
up = self.get(data)
up.is_valid()
assert not up.get_update()
data["version"] = beta_version
up = self.get(data)
up.is_valid()
up.get_update()
assert up.data['row']['file_id'] == file.pk
def test_no_app_version(self):
data = self.good_data.copy()
data['appVersion'] = '1.4'
up = self.get(data)
up.is_valid()
assert not up.get_update()
def test_low_app_version(self):
data = self.good_data.copy()
data['appVersion'] = '2.0'
up = self.get(data)
up.is_valid()
up.get_update()
assert up.data['row']['hash'].startswith('sha256:3808b13e')
assert up.data['row']['min'] == '2.0'
assert up.data['row']['max'] == '4.0'
def test_content_type(self):
up = self.get(self.good_data)
('Content-Type', 'text/xml') in up.get_headers(1)
def test_cache_control(self):
up = self.get(self.good_data)
('Cache-Control', 'public, max-age=3600') in up.get_headers(1)
def test_length(self):
up = self.get(self.good_data)
('Cache-Length', '1') in up.get_headers(1)
def test_expires(self):
"""Check there are these headers and that expires is 3600 later."""
# We aren't bother going to test the actual time in expires, that
# way lies pain with broken tests later.
up = self.get(self.good_data)
hdrs = dict(up.get_headers(1))
lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])
exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])
assert (exp - lm).seconds == 3600
def test_appguid(self):
up = self.get(self.good_data)
rdf = up.get_rdf()
assert rdf.find(self.good_data['appID']) > -1
def get_file_url(self):
"""Return the file url with the hash as parameter."""
return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'
'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'
'e09fef55f2673458bc31f')
def test_url(self):
up = self.get(self.good_data)
up.get_rdf()
assert up.data['row']['url'] == self.get_file_url()
def test_url_local_recent(self):
a_bit_ago = datetime.now() - timedelta(seconds=60)
File.objects.get(pk=67442).update(datestatuschanged=a_bit_ago)
up = self.get(self.good_data)
up.get_rdf()
assert up.data['row']['url'] == self.get_file_url()
def test_url_remote_beta(self):
file = File.objects.get(pk=67442)
file.status = amo.STATUS_BETA
file.save()
beta_version = '2.0.58 beta'
file.version.update(version=beta_version)
data = self.good_data.copy()
data["version"] = beta_version
up = self.get(data)
self.addon_one.status = amo.STATUS_PUBLIC
self.addon_one.save()
up.get_rdf()
assert up.data['row']['file_id'] == file.pk
assert up.data['row']['url'] == self.get_file_url()
def test_hash(self):
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateHash') > -1
file = File.objects.get(pk=67442)
file.hash = ''
file.save()
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateHash') == -1
def test_releasenotes(self):
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateInfoURL') > -1
version = Version.objects.get(pk=81551)
version.update(releasenotes=None)
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateInfoURL') == -1
def test_sea_monkey(self):
data = {
'id': 'bettergmail2@ginatrapani.org',
'version': '1',
'appID': '{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}',
'reqVersion': 1,
'appVersion': '1.0',
}
up = self.get(data)
rdf = up.get_rdf()
assert up.data['row']['hash'].startswith('sha256:9d9a389')
assert up.data['row']['min'] == '1.0'
assert up.data['row']['version'] == '0.5.2'
assert rdf.find(data['appID']) > -1
def test_no_updates_at_all(self):
self.addon_one.versions.all().delete()
upd = self.get(self.good_data)
assert upd.get_rdf() == upd.get_no_updates_rdf()
def test_no_updates_my_fx(self):
data = self.good_data.copy()
data['appVersion'] = '5.0.1'
upd = self.get(data)
assert upd.get_rdf() == upd.get_no_updates_rdf()
class TestFirefoxHotfix(VersionCheckMixin, TestCase):
def setUp(self):
"""Create a "firefox hotfix" addon with a few versions.
Check bug 1031516 for more info.
"""
super(TestFirefoxHotfix, self).setUp()
self.addon = amo.tests.addon_factory(guid='firefox-hotfix@mozilla.org')
# First signature changing hotfix.
amo.tests.version_factory(addon=self.addon, version='20121019.01',
min_app_version='10.0',
max_app_version='16.*')
# Second signature changing hotfix.
amo.tests.version_factory(addon=self.addon, version='20130826.01',
min_app_version='10.0',
max_app_version='24.*')
# Newest version compatible with any Firefox.
amo.tests.version_factory(addon=self.addon, version='20202020.01',
min_app_version='10.0',
max_app_version='30.*')
self.data = {
'id': 'firefox-hotfix@mozilla.org',
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'reqVersion': '2',
}
def test_10_16_first_hotfix(self):
"""The first hotfix changing the signature should be served."""
self.data['version'] = ''
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20121019.01') > -1
def test_10_16_second_hotfix(self):
"""The second hotfix changing the signature should be served."""
self.data['version'] = '20121019.01'
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20130826.01') > -1
def test_10_16_newest_hotfix(self):
"""The newest hotfix should be served."""
self.data['version'] = '20130826.01'
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
def test_16_24_second_hotfix(self):
"""The second hotfix changing the signature should be served."""
self.data['version'] = ''
self.data['appVersion'] = '16.0.2'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20130826.01') > -1
def test_16_24_newest_hotfix(self):
"""The newest hotfix should be served."""
self.data['version'] = '20130826.01'
self.data['appVersion'] = '16.0.2'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
def test_above_24_latest_version(self):
"""The newest hotfix should be served."""
self.data['version'] = ''
self.data['appVersion'] = '28.0'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import json
from datetime import datetime
from flask import Blueprint, abort, redirect, request, url_for
from marshmallow import ValidationError
from six.moves.urllib_parse import parse_qs, urlencode, urlparse, urlunparse
from eduid_common.api.decorators import MarshalWith, UnmarshalWith, require_user
from eduid_common.api.exceptions import AmTaskFailed, MsgTaskFailed
from eduid_common.api.helpers import add_nin_to_user
from eduid_common.api.messages import CommonMsg, error_response, success_response
from eduid_common.api.utils import save_and_sync_user, urlappend
from eduid_common.authn.vccs import add_credentials, revoke_all_credentials
from eduid_common.session import session
from eduid_userdb.exceptions import UserOutOfSync
from eduid_userdb.proofing import NinProofingElement
from eduid_userdb.proofing.state import NinProofingState
from eduid_userdb.security import SecurityUser
from eduid_webapp.security.app import current_security_app as current_app
from eduid_webapp.security.helpers import (
SecurityMsg,
compile_credential_list,
generate_suggested_password,
get_zxcvbn_terms,
remove_nin_from_user,
send_termination_mail,
)
from eduid_webapp.security.schemas import (
AccountTerminatedSchema,
ChangePasswordSchema,
ChpassResponseSchema,
CsrfSchema,
NINRequestSchema,
NINResponseSchema,
RedirectResponseSchema,
SecurityResponseSchema,
SuggestedPasswordResponseSchema,
)
security_views = Blueprint('security', __name__, url_prefix='', template_folder='templates')
@security_views.route('/credentials', methods=['GET'])
@MarshalWith(SecurityResponseSchema)
@require_user
def get_credentials(user):
"""
View to get credentials for the logged user.
"""
current_app.logger.debug(f'Trying to get the credentials for user {user}')
credentials = {'credentials': compile_credential_list(user)}
return credentials
@security_views.route('/suggested-password', methods=['GET'])
@MarshalWith(SuggestedPasswordResponseSchema)
@require_user
def get_suggested(user):
"""
View to get a suggested password for the logged user.
"""
current_app.logger.debug(f'Trying to get the credentials for user {user}')
suggested = {'suggested_password': generate_suggested_password()}
return suggested
@security_views.route('/change-password', methods=['POST'])
@MarshalWith(ChpassResponseSchema)
@require_user
def change_password(user):
"""
View to change the password
"""
security_user = SecurityUser.from_user(user, current_app.private_userdb)
min_entropy = current_app.conf.password_entropy
schema = ChangePasswordSchema(zxcvbn_terms=get_zxcvbn_terms(security_user.eppn), min_entropy=int(min_entropy))
if not request.data:
return error_response(message='chpass.no-data')
try:
form = schema.load(json.loads(request.data))
current_app.logger.debug(form)
except ValidationError as e:
current_app.logger.error(e)
return error_response(message='chpass.weak-password')
else:
old_password = form.get('old_password')
new_password = form.get('new_password')
if session.get_csrf_token() != form['csrf_token']:
return error_response(message='csrf.try_again')
authn_ts = session.get('reauthn-for-chpass', None)
if authn_ts is None:
return error_response(message='chpass.no_reauthn')
now = datetime.utcnow()
delta = now - datetime.fromtimestamp(authn_ts)
timeout = current_app.conf.chpass_timeout
if int(delta.total_seconds()) > timeout:
return error_response(message='chpass.stale_reauthn')
vccs_url = current_app.conf.vccs_url
added = add_credentials(old_password, new_password, security_user, source='security', vccs_url=vccs_url)
if not added:
current_app.logger.debug('Problem verifying the old credentials for {}'.format(user))
return error_response(message='chpass.unable-to-verify-old-password')
security_user.terminated = None
try:
save_and_sync_user(security_user)
except UserOutOfSync:
return error_response(message='user-out-of-sync')
del session['reauthn-for-chpass']
current_app.stats.count(name='security_password_changed', value=1)
current_app.logger.info('Changed password for user {}'.format(security_user.eppn))
next_url = current_app.conf.dashboard_url
credentials = {
'next_url': next_url,
'credentials': compile_credential_list(security_user),
'message': 'chpass.password-changed',
}
return credentials
@security_views.route('/terminate-account', methods=['POST'])
@MarshalWith(RedirectResponseSchema)
@UnmarshalWith(CsrfSchema)
@require_user
def delete_account(user):
"""
Terminate account view.
It receives a POST request, checks the csrf token,
schedules the account termination action,
and redirects to the IdP.
"""
current_app.logger.debug('Initiating account termination for user {}'.format(user))
ts_url = current_app.conf.token_service_url
terminate_url = urlappend(ts_url, 'terminate')
next_url = url_for('security.account_terminated')
params = {'next': next_url}
url_parts = list(urlparse(terminate_url))
query = parse_qs(url_parts[4])
query.update(params)
url_parts[4] = urlencode(query)
location = urlunparse(url_parts)
return {'location': location}
@security_views.route('/account-terminated', methods=['GET'])
@MarshalWith(AccountTerminatedSchema)
@require_user
def account_terminated(user):
"""
The account termination action,
removes all credentials for the terminated account
from the VCCS service,
flags the account as terminated,
sends an email to the address in the terminated account,
and logs out the session.
:type user: eduid_userdb.user.User
"""
security_user = SecurityUser.from_user(user, current_app.private_userdb)
authn_ts = session.get('reauthn-for-termination', None)
if authn_ts is None:
abort(400)
now = datetime.utcnow()
delta = now - datetime.fromtimestamp(authn_ts)
if int(delta.total_seconds()) > 600:
return error_response(message=SecurityMsg.stale_reauthn)
del session['reauthn-for-termination']
# revoke all user passwords
revoke_all_credentials(security_user, vccs_url=current_app.conf.vccs_url)
# Skip removing old passwords from the user at this point as a password reset will do that anyway.
# This fixes the problem with loading users for a password reset as users without passwords triggers
# the UserHasNotCompletedSignup check in eduid-userdb.
# TODO: Needs a decision on how to handle unusable user passwords
# for p in security_user.credentials.filter(Password).to_list():
# security_user.passwords.remove(p.key)
# flag account as terminated
security_user.terminated = datetime.utcnow()
try:
save_and_sync_user(security_user)
except UserOutOfSync:
return error_response(message=CommonMsg.out_of_sync)
current_app.stats.count(name='security_account_terminated', value=1)
current_app.logger.info('Terminated user account')
# email the user
try:
send_termination_mail(security_user)
except MsgTaskFailed as e:
current_app.logger.error(f'Failed to send account termination mail: {e}')
current_app.logger.error('Account will be terminated successfully anyway.')
current_app.logger.debug(f'Logging out (terminated) user {user}')
return redirect(f'{current_app.conf.logout_endpoint}?next={current_app.conf.termination_redirect_url}')
@security_views.route('/remove-nin', methods=['POST'])
@UnmarshalWith(NINRequestSchema)
@MarshalWith(NINResponseSchema)
@require_user
def remove_nin(user, nin):
security_user = SecurityUser.from_user(user, current_app.private_userdb)
current_app.logger.info('Removing NIN from user')
current_app.logger.debug('NIN: {}'.format(nin))
nin_obj = security_user.nins.find(nin)
if nin_obj and nin_obj.is_verified:
current_app.logger.info('NIN verified. Will not remove it.')
return error_response(message=SecurityMsg.rm_verified)
try:
remove_nin_from_user(security_user, nin)
return success_response(
payload=dict(nins=security_user.nins.to_list_of_dicts()), message=SecurityMsg.rm_success
)
except AmTaskFailed as e:
current_app.logger.error('Removing nin from user failed')
current_app.logger.debug(f'NIN: {nin}')
current_app.logger.error('{}'.format(e))
return error_response(message=CommonMsg.temp_problem)
@security_views.route('/add-nin', methods=['POST'])
@UnmarshalWith(NINRequestSchema)
@MarshalWith(NINResponseSchema)
@require_user
def add_nin(user, nin):
security_user = SecurityUser.from_user(user, current_app.private_userdb)
current_app.logger.info('Removing NIN from user')
current_app.logger.debug('NIN: {}'.format(nin))
nin_obj = security_user.nins.find(nin)
if nin_obj:
current_app.logger.info('NIN already added.')
return error_response(message=SecurityMsg.already_exists)
try:
nin_element = NinProofingElement(number=nin, created_by='security', is_verified=False)
proofing_state = NinProofingState(id=None, eppn=security_user.eppn, nin=nin_element, modified_ts=None)
add_nin_to_user(user, proofing_state, user_class=SecurityUser)
return success_response(
payload=dict(nins=security_user.nins.to_list_of_dicts()), message=SecurityMsg.add_success
)
except AmTaskFailed as e:
current_app.logger.error('Adding nin to user failed')
current_app.logger.debug(f'NIN: {nin}')
current_app.logger.error('{}'.format(e))
return error_response(message=CommonMsg.temp_problem)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This module presents an interface to use the glm implemented in
nipy.algorithms.statistics.models.regression.
It contains the GLM and contrast classes that are meant to be the main objects
of fMRI data analyses.
It is important to note that the GLM is meant as a one-session General Linear
Model. But inference can be performed on multiple sessions by computing fixed
effects on contrasts
Examples
--------
>>> import numpy as np
>>> from nipy.modalities.fmri.glm import GeneralLinearModel
>>> n, p, q = 100, 80, 10
>>> X, Y = np.random.randn(p, q), np.random.randn(p, n)
>>> cval = np.hstack((1, np.zeros(9)))
>>> model = GeneralLinearModel(X)
>>> model.fit(Y)
>>> z_vals = model.contrast(cval).z_score() # z-transformed statistics
Example of fixed effects statistics across two contrasts
>>> cval_ = cval.copy()
>>> np.random.shuffle(cval_)
>>> z_ffx = (model.contrast(cval) + model.contrast(cval_)).z_score()
"""
import numpy as np
from warnings import warn
import scipy.stats as sps
from nibabel import load, Nifti1Image
from nipy.labs.mask import compute_mask_sessions
from nipy.algorithms.statistics.models.regression import OLSModel, ARModel
from nipy.algorithms.statistics.utils import multiple_mahalanobis, z_score
from nipy.core.api import is_image
from nipy.testing.decorators import skip_doctest_if
from nipy.utils import HAVE_EXAMPLE_DATA
DEF_TINY = 1e-50
DEF_DOFMAX = 1e10
def data_scaling(Y):
"""Scaling of the data to have pourcent of baseline change columnwise
Parameters
----------
Y: array of shape(n_time_points, n_voxels)
the input data
Returns
-------
Y: array of shape (n_time_points, n_voxels),
the data after mean-scaling, de-meaning and multiplication by 100
mean : array of shape (n_voxels,)
the data mean
"""
mean = Y.mean(0)
Y = 100 * (Y / mean - 1)
return Y, mean
class GeneralLinearModel(object):
""" This class handles the so-called on General Linear Model
Most of what it does in the fit() and contrast() methods
fit() performs the standard two-step ('ols' then 'ar1') GLM fitting
contrast() returns a contrast instance, yileding statistics and p-values.
The link between fit() and constrast is done vis the two class members:
glm_results : dictionary of nipy.algorithms.statistics.models.
regression.RegressionResults instances,
describing results of a GLM fit
labels : array of shape(n_voxels),
labels that associate each voxel with a results key
"""
def __init__(self, X):
"""
Parameters
----------
X : array of shape (n_time_points, n_regressors)
the design matrix
"""
self.X = X
self.labels_ = None
self.results_ = None
def fit(self, Y, model='ar1', steps=100):
"""GLM fitting of a dataset using 'ols' regression or the two-pass
Parameters
----------
Y : array of shape(n_time_points, n_samples)
the fMRI data
model : {'ar1', 'ols'}, optional
the temporal variance model. Defaults to 'ar1'
steps : int, optional
Maximum number of discrete steps for the AR(1) coef histogram
"""
if model not in ['ar1', 'ols']:
raise ValueError('Unknown model')
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[0] != self.X.shape[0]:
raise ValueError('Response and predictors are inconsistent')
# fit the OLS model
ols_result = OLSModel(self.X).fit(Y)
# compute and discretize the AR1 coefs
ar1 = ((ols_result.resid[1:] * ols_result.resid[:-1]).sum(0) /
(ols_result.resid ** 2).sum(0))
ar1 = (ar1 * steps).astype(np.int) * 1. / steps
# Fit the AR model acccording to current AR(1) estimates
if model == 'ar1':
self.results_ = {}
self.labels_ = ar1
# fit the model
for val in np.unique(self.labels_):
m = ARModel(self.X, val)
self.results_[val] = m.fit(Y[:, self.labels_ == val])
else:
self.labels_ = np.zeros(Y.shape[1])
self.results_ = {0.0: ols_result}
def get_beta(self, column_index=None):
"""Acessor for the best linear unbiased estimated of model parameters
Parameters
----------
column_index: int or array-like of int or None, optional
The indexed of the columns to be returned. if None (default
behaviour), the whole vector is returned
Returns
-------
beta: array of shape (n_voxels, n_columns)
the beta
"""
# make colum_index a list if it an int
if column_index == None:
column_index = np.arange(self.X.shape[1])
if not hasattr(column_index, '__iter__'):
column_index = [int(column_index)]
n_beta = len(column_index)
# build the beta array
beta = np.zeros((n_beta, self.labels_.size), dtype=np.float)
for l in self.results_.keys():
beta[:, self.labels_ == l] = self.results_[l].theta[column_index]
return beta
def get_mse(self):
"""Acessor for the mean squared error of the model
Returns
-------
mse: array of shape (n_voxels)
the sum of square error per voxel
"""
# build the beta array
mse = np.zeros(self.labels_.size, dtype=np.float)
for l in self.results_.keys():
mse[self.labels_ == l] = self.results_[l].MSE
return mse
def get_logL(self):
"""Acessor for the log-likelihood of the model
Returns
-------
logL: array of shape (n_voxels,)
the sum of square error per voxel
"""
# build the beta array
logL = np.zeros(self.labels_.size, dtype=np.float)
for l in self.results_.keys():
logL[self.labels_ == l] = self.results_[l].logL
return logL
def contrast(self, con_val, contrast_type=None):
""" Specify and estimate a linear contrast
Parameters
----------
con_val : numpy.ndarray of shape (p) or (q, p)
where q = number of contrast vectors and p = number of regressors
contrast_type : {None, 't', 'F' or 'tmin-conjunction'}, optional
type of the contrast. If None, then defaults to 't' for 1D
`con_val` and 'F' for 2D `con_val`
Returns
-------
con: Contrast instance
"""
if self.labels_ == None or self.results_ == None:
raise ValueError('The model has not been estimated yet')
con_val = np.asarray(con_val)
if con_val.ndim == 1:
dim = 1
else:
dim = con_val.shape[0]
if contrast_type is None:
if dim == 1:
contrast_type = 't'
else:
contrast_type = 'F'
if contrast_type not in ['t', 'F', 'tmin-conjunction']:
raise ValueError('Unknown contrast type: %s' % contrast_type)
effect_ = np.zeros((dim, self.labels_.size), dtype=np.float)
var_ = np.zeros((dim, dim, self.labels_.size), dtype=np.float)
if contrast_type == 't':
for l in self.results_.keys():
resl = self.results_[l].Tcontrast(con_val)
effect_[:, self.labels_ == l] = resl.effect.T
var_[:, :, self.labels_ == l] = (resl.sd ** 2).T
else:
for l in self.results_.keys():
resl = self.results_[l].Fcontrast(con_val)
effect_[:, self.labels_ == l] = resl.effect
var_[:, :, self.labels_ == l] = resl.covariance
dof_ = self.results_[l].df_resid
return Contrast(effect=effect_, variance=var_, dof=dof_,
contrast_type=contrast_type)
class Contrast(object):
""" The contrast class handles the estimation of statistical contrasts
on a given model: student (t), Fisher (F), conjunction (tmin-conjunction).
The important feature is that it supports addition,
thus opening the possibility of fixed-effects models.
The current implementation is meant to be simple,
and could be enhanced in the future on the computational side
(high-dimensional F constrasts may lead to memory breakage).
Notes
-----
The 'tmin-conjunction' test is the valid conjunction test discussed in:
Nichols T, Brett M, Andersson J, Wager T, Poline JB. Valid conjunction
inference with the minimum statistic. Neuroimage. 2005 Apr 15;25(3):653-60.
This test gives the p-value of the z-values under the conjunction null,
i.e. the union of the null hypotheses for all terms.
"""
def __init__(self, effect, variance, dof=DEF_DOFMAX, contrast_type='t',
tiny=DEF_TINY, dofmax=DEF_DOFMAX):
"""
Parameters
==========
effect: array of shape (contrast_dim, n_voxels)
the effects related to the contrast
variance: array of shape (contrast_dim, contrast_dim, n_voxels)
the associated variance estimate
dof: scalar, the degrees of freedom
contrast_type: string to be chosen among 't' and 'F'
"""
if variance.ndim != 3:
raise ValueError('Variance array should have 3 dimensions')
if effect.ndim != 2:
raise ValueError('Variance array should have 2 dimensions')
if variance.shape[0] != variance.shape[1]:
raise ValueError('Inconsistent shape for the variance estimate')
if ((variance.shape[1] != effect.shape[0]) or
(variance.shape[2] != effect.shape[1])):
raise ValueError('Effect and variance have inconsistent shape')
self.effect = effect
self.variance = variance
self.dof = float(dof)
self.dim = effect.shape[0]
if self.dim > 1 and contrast_type is 't':
print 'Automatically converted multi-dimensional t to F contrast'
contrast_type = 'F'
self.contrast_type = contrast_type
self.stat_ = None
self.p_value_ = None
self.baseline = 0
self.tiny = tiny
self.dofmax = dofmax
def stat(self, baseline=0.0):
""" Return the decision statistic associated with the test of the
null hypothesis: (H0) 'contrast equals baseline'
Parameters
==========
baseline: float, optional,
Baseline value for the test statistic
"""
self.baseline = baseline
# Case: one-dimensional contrast ==> t or t**2
if self.dim == 1:
# avoids division by zero
stat = (self.effect - baseline) / np.sqrt(
np.maximum(self.variance, self.tiny))
if self.contrast_type == 'F':
stat = stat ** 2
# Case: F contrast
elif self.contrast_type == 'F':
# F = |t|^2/q , |t|^2 = e^t inv(v) e
if self.effect.ndim == 1:
self.effect = self.effect[np.newaxis]
if self.variance.ndim == 1:
self.variance = self.variance[np.newaxis, np.newaxis]
stat = (multiple_mahalanobis(self.effect - baseline,
self.variance) / self.dim)
# Case: tmin (conjunctions)
elif self.contrast_type == 'tmin-conjunction':
vdiag = self.variance.reshape([self.dim ** 2] + list(
self.variance.shape[2:]))[:: self.dim + 1]
stat = (self.effect - baseline) / np.sqrt(
np.maximum(vdiag, self.tiny))
stat = stat.min(0)
# Unknwon stat
else:
raise ValueError('Unknown statistic type')
self.stat_ = stat
return stat.ravel()
def p_value(self, baseline=0.0):
"""Return a parametric estimate of the p-value associated
with the null hypothesis: (H0) 'contrast equals baseline'
Parameters
==========
baseline: float, optional,
Baseline value for the test statistic
"""
if self.stat_ == None or not self.baseline == baseline:
self.stat_ = self.stat(baseline)
# Valid conjunction as in Nichols et al, Neuroimage 25, 2005.
if self.contrast_type in ['t', 'tmin-conjunction']:
p = sps.t.sf(self.stat_, np.minimum(self.dof, self.dofmax))
elif self.contrast_type == 'F':
p = sps.f.sf(self.stat_, self.dim, np.minimum(
self.dof, self.dofmax))
else:
raise ValueError('Unknown statistic type')
self.p_value_ = p
return p
def z_score(self, baseline=0.0):
"""Return a parametric estimation of the z-score associated
with the null hypothesis: (H0) 'contrast equals baseline'
Parameters
==========
baseline: float, optional,
Baseline value for the test statistic
"""
if self.p_value_ == None or not self.baseline == baseline:
self.p_value_ = self.p_value(baseline)
# Avoid inf values kindly supplied by scipy.
self.z_score_ = z_score(self.p_value_)
return self.z_score_
def __add__(self, other):
"""Addition of selfwith others, Yields an new Contrast instance
This should be used only on indepndent contrasts"""
if self.contrast_type != other.contrast_type:
raise ValueError(
'The two contrasts do not have consistant type dimensions')
if self.dim != other.dim:
raise ValueError(
'The two contrasts do not have compatible dimensions')
effect_ = self.effect + other.effect
variance_ = self.variance + other.variance
dof_ = self.dof + other.dof
return Contrast(effect=effect_, variance=variance_, dof=dof_,
contrast_type=self.contrast_type)
def __rmul__(self, scalar):
"""Multiplication of the contrast by a scalar"""
scalar = float(scalar)
effect_ = self.effect * scalar
variance_ = self.variance * scalar ** 2
dof_ = self.dof
return Contrast(effect=effect_, variance=variance_, dof=dof_,
contrast_type=self.contrast_type)
__mul__ = __rmul__
def __div__(self, scalar):
return self.__rmul__(1 / float(scalar))
class FMRILinearModel(object):
""" This class is meant to handle GLMs from a higher-level perspective
i.e. by taking images as input and output
"""
@skip_doctest_if(not HAVE_EXAMPLE_DATA)
def __init__(self, fmri_data, design_matrices, mask='compute',
m=0.2, M=0.9, threshold=.5):
"""Load the data
Parameters
----------
fmri_data : Image or str or sequence of Images / str
fmri images / paths of the (4D) fmri images
design_matrices : arrays or str or sequence of arrays / str
design matrix arrays / paths of .npz files
mask : str or Image or None, optional
string can be 'compute' or a path to an image
image is an input (assumed binary) mask image(s),
if 'compute', the mask is computed
if None, no masking will be applied
m, M, threshold: float, optional
parameters of the masking procedure. Should be within [0, 1]
Notes
-----
The only computation done here is mask computation (if required)
Examples
--------
We need the example data package for this example
>>> from nipy.utils import example_data
>>> from nipy.modalities.fmri.glm import FMRILinearModel
>>> fmri_files = [example_data.get_filename('fiac', 'fiac0', run)
... for run in ['run1.nii.gz', 'run2.nii.gz']]
>>> design_files = [example_data.get_filename('fiac', 'fiac0', run)
... for run in ['run1_design.npz', 'run2_design.npz']]
>>> mask = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz')
>>> multi_session_model = FMRILinearModel(fmri_files, design_files, mask)
>>> multi_session_model.fit()
>>> z_image, = multi_session_model.contrast([np.eye(13)[1]] * 2)
The number of voxels with p < 0.001
>>> np.sum(z_image.get_data() > 3.09)
671
"""
# manipulate the arguments
if isinstance(fmri_data, basestring) or hasattr(fmri_data, 'get_data'):
fmri_data = [fmri_data]
if isinstance(design_matrices, (basestring, np.ndarray)):
design_matrices = [design_matrices]
if len(fmri_data) != len(design_matrices):
raise ValueError('Incompatible number of fmri runs and '
'design matrices were provided')
self.fmri_data, self.design_matrices = [], []
self.glms, self.means = [], []
# load the fmri data
for fmri_run in fmri_data:
if isinstance(fmri_run, basestring):
self.fmri_data.append(load(fmri_run))
else:
self.fmri_data.append(fmri_run)
# set self.affine as the affine of the first image
self.affine = self.fmri_data[0].get_affine()
# load the designs
for design_matrix in design_matrices:
if isinstance(design_matrix, basestring):
loaded = np.load(design_matrix)
self.design_matrices.append(loaded[loaded.files[0]])
else:
self.design_matrices.append(design_matrix)
# load the mask
if mask == 'compute':
mask = compute_mask_sessions(
fmri_data, m=m, M=M, cc=1, threshold=threshold, opening=0)
self.mask = Nifti1Image(mask.astype(np.int8), self.affine)
elif mask == None:
mask = np.ones(self.fmri_data[0].shape[:3]).astype(np.int8)
self.mask = Nifti1Image(mask, self.affine)
else:
if isinstance(mask, basestring):
self.mask = load(mask)
else:
self.mask = mask
def fit(self, do_scaling=True, model='ar1', steps=100):
""" Load the data, mask the data, scale the data, fit the GLM
Parameters
----------
do_scaling : bool, optional
if True, the data should be scaled as pourcent of voxel mean
model : string, optional,
the kind of glm ('ols' or 'ar1') you want to fit to the data
steps : int, optional
in case of an ar1, discretization of the ar1 parameter
"""
from nibabel import Nifti1Image
# get the mask as an array
mask = self.mask.get_data().astype(np.bool)
self.glms, self.means = [], []
for fmri, design_matrix in zip(self.fmri_data, self.design_matrices):
if do_scaling:
# scale the data
data, mean = data_scaling(fmri.get_data()[mask].T)
else:
data, mean = (fmri.get_data()[mask].T,
fmri.get_data()[mask].T.mean(0))
mean_data = mask.astype(np.int16)
mean_data[mask] = mean
self.means.append(Nifti1Image(mean_data, self.affine))
# fit the GLM
glm = GeneralLinearModel(design_matrix)
glm.fit(data, model, steps)
self.glms.append(glm)
def contrast(self, contrasts, con_id='', contrast_type=None, output_z=True,
output_stat=False, output_effects=False,
output_variance=False):
""" Estimation of a contrast as fixed effects on all sessions
Parameters
----------
contrasts : array or list of arrays of shape (n_col) or (n_dim, n_col)
where ``n_col`` is the number of columns of the design matrix,
numerical definition of the contrast (one array per run)
con_id : str, optional
name of the contrast
contrast_type : {'t', 'F', 'tmin-conjunction'}, optional
type of the contrast
output_z : bool, optional
Return or not the corresponding z-stat image
output_stat : bool, optional
Return or not the base (t/F) stat image
output_effects : bool, optional
Return or not the corresponding effect image
output_variance : bool, optional
Return or not the corresponding variance image
Returns
-------
output_images : list of nibabel images
The desired output images
"""
if self.glms == []:
raise ValueError('first run fit() to estimate the model')
if isinstance(contrasts, np.ndarray):
contrasts = [contrasts]
if len(contrasts) != len(self.glms):
raise ValueError(
'contrasts must be a sequence of %d session contrasts' %
len(self.glms))
contrast_ = None
for i, (glm, con) in enumerate(zip(self.glms, contrasts)):
if np.all(con == 0):
warn('Contrast for session %d is null' % i)
elif contrast_ is None:
contrast_ = glm.contrast(con, contrast_type)
else:
contrast_ = contrast_ + glm.contrast(con, contrast_type)
if output_z or output_stat:
# compute the contrast and stat
contrast_.z_score()
# Prepare the returned images
mask = self.mask.get_data().astype(np.bool)
do_outputs = [output_z, output_stat, output_effects, output_variance]
estimates = ['z_score_', 'stat_', 'effect', 'variance']
descrips = ['z statistic', 'Statistical value', 'Estimated effect',
'Estimated variance']
dims = [1, 1, contrast_.dim, contrast_.dim ** 2]
n_vox = contrast_.z_score_.size
output_images = []
for (do_output, estimate, descrip, dim) in zip(
do_outputs, estimates, descrips, dims):
if do_output:
if dim > 1:
result_map = np.tile(
mask.astype(np.float)[:, :, :, np.newaxis], dim)
result_map[mask] = np.reshape(
getattr(contrast_, estimate).T, (n_vox, dim))
else:
result_map = mask.astype(np.float)
result_map[mask] = np.squeeze(
getattr(contrast_, estimate))
output = Nifti1Image(result_map, self.affine)
output.get_header()['descrip'] = (
'%s associated with contrast %s' % (descrip, con_id))
output_images.append(output)
return output_images
| |
#!/usr/bin/env python
"""
File contains functions to compute counts for constructing Markov
chains for the dynamics of edges on a two-layer multiplex network.
"""
import numpy as np
import networkx as nx
import logging
#set up logs
logger = logging.getLogger("multiplex_markov_chain")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def node_counts(g1,g2):
"""
Returns a dictionary of node-based transition counts.
Currently, the state of a node is computed as a binary vector indicating
above/below average activity in each layer.
g1 = list of graphs describing the multiplex at one time step
g2 = list of graphs describing the multiplex at the next time step
"""
counts = dict()
num_layers = len(g1)
node_set = get_node_set(g1,g2,method="intersection")
mean_degrees1 = [np.mean(nx.degree(g1[k]).values()) for k in range(num_layers)]
mean_degrees2 = [np.mean(nx.degree(g2[k]).values()) for k in range(num_layers)]
for node in node_set:
s1 = tuple(int(nx.degree(g1[k])[node]>mean_degrees1[k]) for k in range(num_layers))
s2 = tuple(int(nx.degree(g2[k])[node]>mean_degrees2[k]) for k in range(num_layers))
if (s1,s2) in counts.keys():
counts[(s1,s2)] += 1
else:
counts[(s1,s2)] = 1
return counts
def get_node_set(g1,g2,method="union"):
"""
Returns the set of nodes that have to be considered in counting
transitions of the Markov chains. The input for the keyword
argument `method` controls the method used.
g1 and g2 are n-tuples of graphs, where n is the number of layers
g1[k] is a graph describing the k-th layer in the multiplex at time t
g2[k] is the k-th layer at time t+1
"""
num_layers = len(g1)
nodes1 = set()
nodes2 = set()
for k in range(num_layers):
for n in g1[k].nodes():
nodes1.add(n)
for n in g2[k].nodes():
nodes2.add(n)
if (method=="intersection"):
node_set = list(nodes1 & nodes2)
else:
node_set = list(nodes1 | nodes2)
return node_set
def get_counts(g1, g2, method):
"""
Computes the counts for each transition from time (t) step to time
(t+1) given networkx graph instances for the two time steps.
Parameters
-----------
g1 : list of nx graph objects representing the multiplex at time t
g2 : list of nx graph objects representing the multiplex at time (t+1)
OR
g1 : Multinet instance representing the multiplex at time t
g2 : Multinet instance representing the multiplex at time (t+1)
The output is a dictionary giving
counts from time t to time (t+1), for each possible pair of joint states.
non-existence of an edge is coded as 0 by default.
method : When the set of nodes in g1 is not the same as g2, the
`method` to be used to find a common set of nodes. Accepts two
values union or intersect.
Returns
-------
counts : dictionary of counts for the transitions
"""
# get the set of nodes to iterate over
node_set = get_node_set(g1, g2, method)
num_layers = len(g1)
# Now count the numbers for each transition
counts = dict()
import itertools
for transition in itertools.product(itertools.product(range(2),repeat=2),repeat=2):
counts[transition] = 0
if g1[0].is_directed():
for node1 in node_set:
for node2 in node_set: # loop over all ordered pairs
# 'state' codes all interactions between node1 and node2 (both directions)
# state[k] is a tuple indicating the state of the dyad in layer k. This tuple can be (0,0),(0,1),(1,0), or (1,1)
prev_state = tuple((int(g1[k].has_edge(node1,node2)), int(g1[k].has_edge(node2,node1))) for k in range(num_layers))
current_state = tuple((int(g2[k].has_edge(node1,node2)), int(g2[k].has_edge(node2,node1))) for k in range(num_layers))
if ((prev_state,current_state) in counts.keys()):
counts[(prev_state,current_state)] += 1
else:
counts[(prev_state,current_state)] = 1
else:
for index,node1 in enumerate(node_set):
for node2 in node_set[index+1:]: # loop over all unordered pairs
prev_state = tuple(int(g1[k].has_edge(node1,node2)) for k in range(num_layers))
current_state = tuple(int(g2[k].has_edge(node1,node2)) for k in range(num_layers))
if ((prev_state,current_state) in counts.keys()):
counts[(prev_state,current_state)] += 1
else:
counts[(prev_state,current_state)] = 1
return counts
def compute_counts_from_file(fname_edges, fname_nodes=None, method=None):
"""
Get as inputs file path for edges of the graph. Returns a
dictionary with counts indexed by the time steps
Parameters
-----------
fname_edges : path to a csv file of edge information in the
following format.
Time,Node1,Node2,Edge-Type1,Edge-Type2
Time: Integer indicating the time the pair of nodes
Edge-Type1 : Binary value. 0 (1) indicates absence (presence) of
an edge of type 1
Edge-Type2 : Binary value. 0 (1) indicates absence (presence) of
an edge of type 2
The file could be an edge-list, i.e., a list of only the edges
present in the network or a list of all possible node-pairs with
information of edges that are absent.
fname_nodes : optional, path to a csv file with node information
with the following format. Time,Node
Assumptions:
- The values for Time above is non-decreasing.
- When there is a change, time increases by 1.
method : method to use when the set of nodes between two time
steps are not the same. The variable accepts the strings `union`
or `intersection`.
Returns
--------
counts : dictionary with time steps as key and the np.array of
counts for the transitions as the value.
"""
fEdges = open(fname_edges,"r")
fEdges.readline()
counts = {}
prevTimeStep = None
timeStepToProcess = None
# When method is not specified, if node file is given use the
# intersection between two time steps, else use the union.
if method is None:
if fname_nodes is None:
method = "union"
else:
method = "intersection"
if (fname_nodes is not None):
fNodes = open(fname_nodes,"r")
fNodes.readline()
nodeLine = fNodes.readline()
nodeLine = nodeLine.rstrip()
if (nodeLine):
timeStep_nodes, node = nodeLine.split(",")
else:
nodeLine = None
for line in fEdges:
line = line.rstrip()
edge = line.split(",")
if (len(edge) != 5):
logger.warning("Line not in proper format. Ignoring %s",line)
continue
timeStep, n1, n2, eA, eB = edge
if (timeStep != prevTimeStep):
if prevTimeStep is not None:
if (timeStepToProcess is None):
timeStepToProcess = prevTimeStep
else:
# There are two graphs that are built. Get counts for them.
logger.info("Getting counts for %s-->%s",g_old.graph['time'],g_new.graph['time'])
c = get_counts(g_old, g_new, method = method)
counts[timeStepToProcess] = c
timeStepToProcess = prevTimeStep
#New time step has started. Assign old graphs to the new ones.
g_old = g_new
# Start building a new graph for the current time.
g_new = nx.Graph(time=timeStep)
#add nodes for this timeStep
if fname_nodes is not None:
while (nodeLine and timeStep_nodes == timeStep):
g_new.add_node(node)
nodeLine = fNodes.readline()
nodeLine = nodeLine.rstrip()
if (nodeLine):
timeStep_nodes, node = nodeLine.split(",")
else:
nodeLine = None
#another edge in the graph, process and store the state
# assuming inputs are "nice"
g_new.add_nodes_from([n1,n2])
try:
edgeState = 2*int(eB) + int(eA)
except:
logger.error("Edge '%s' cannot produce an integer valued state. Please check the input.", line)
if (g_new.has_edge(n1,n2) and g_new.edge[n1][n2]["state"] != edgeState):
logger.warning("Graph already has edge %s--%s in state %s",n1,n2,g_new.edge[n1][n2]["state"])
g_new.add_edge(n1,n2, state=edgeState)
prevTimeStep = timeStep
logger.info("Reached end of edge list")
logger.info("Getting counts for %s-->%s",g_old.graph['time'],g_new.graph['time'])
c = get_counts(g_old, g_new, method)
counts[timeStepToProcess] = c
fEdges.close()
return counts
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class DeploymentsOperations(object):
"""DeploymentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2017-05-10".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-05-10"
self.config = config
def _delete_initial(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted.
Deleting a template deployment removes the associated deployment
operations. Deleting a template deployment does not affect the state of
the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted.
The Location response header contains the URI that is used to obtain
the status of the process. While the process is running, a call to the
URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on
success. If the asynchronous request failed, the URI in the Location
header returns an error-level status code.
:param resource_group_name: The name of the resource group with the
deployment to delete. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to delete.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def check_existence(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the
deployment to check. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to check.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: bool or ClientRawResponse if raw=true
:rtype: bool or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 204)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or
link to JSON files.
:param resource_group_name: The name of the resource group to deploy
the resources to. The name is case insensitive. The resource group
must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
DeploymentExtended or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
properties=properties,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to get.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentExtended or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def cancel(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted
or Running. After the deployment is canceled, the provisioningState is
set to Canceled. Canceling a template deployment stops the currently
running template deployment and leaves the resource group partially
deployed.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to cancel.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def validate(
self, resource_group_name, deployment_name, properties, custom_headers=None, raw=False, **operation_config):
"""Validates whether the specified template is syntactically correct and
will be accepted by Azure Resource Manager..
:param resource_group_name: The name of the resource group the
template will be deployed to. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param properties: The deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentValidateResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentValidateResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.Deployment(properties=properties)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Deployment')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 400]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export_template(
self, resource_group_name, deployment_name, custom_headers=None, raw=False, **operation_config):
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment from which to get
the template.
:type deployment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeploymentExportResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExportResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExportResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the
deployments to get. The name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you
can use $filter=provisioningState eq '{state}'.
:type filter: str
:param top: The number of results to get. If null is passed, returns
all deployments.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DeploymentExtended
:rtype:
~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtendedPaged[~azure.mgmt.resource.resources.v2017_05_10.models.DeploymentExtended]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeploymentExtendedPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
import datetime
import re
import binascii
import random
import string
import logging
from django import forms
from django.db import models
from django.forms import fields
from django.utils.encoding import force_text, smart_bytes
import sys
from core.encryption.Factories import FactoryEncryptionServices as efac
from core.encryption.encryptionFieldsBase import encryptionBaseMethods as ebm
log = logging.getLogger(__name__)
class BaseField(models.Field):
def __init__(self, *args, **kwargs):
# Get the active encryption and key management services, if any
self.use_encryption = efac.use_encryption()
self.aes = efac.active_encryption_service() if self.use_encryption else None
self.akms = efac.active_key_management_service() if self.use_encryption else None
self.block_size = self.aes.block_size() if self.use_encryption else None
# Need to adjust the max length supplied in the user's field args to account for
# cipher block size and padding
if self.use_encryption:
user_specified_length = kwargs.get('max_length', 20)
unique = kwargs.get('unique', False)
max_length, usl = ebm._max_db_length (unique, user_specified_length, self.block_size, self.aes)
self.user_specified_max_length = usl
kwargs['max_length'] = max_length
models.Field.__init__(self, *args, **kwargs)
def _is_encrypted(self, value, key, iv):
'''
If value contains any non hex symbols or its length is odd, then it was
not encrypted because the encrypted values are all converted to ascii hex
before storing in db using the binascii.a2b_hex method which only operates
on even length values
'''
hexValues = True
# test to see if value is a hexadecimal
# get rid of extra spaces
value = value.strip()
try:
int(value, 16)
except ValueError:
hexValues = False
if hexValues == False or (len(value) % 2) != 0 :
return False
else:
# Have the encryption service verify if this is encrypted
return self.aes.is_encrypted(binascii.a2b_hex(value), key, iv)
def get_decrypted_value (self, value):
"""Converts the input value into the expected Python data type by
dehexifying and decrypting the value. It raises
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. """
if len(value.strip()) == 0:
return value
if self.use_encryption:
key = self.akms.get_key()
iv = self.akms.get_iv()
if self._is_encrypted(value, key, iv):
# dehexify and decrypt
decrypted_value = self.aes.decrypt(binascii.a2b_hex(value), key, iv)
# get rid of extra bytes
decrypted_value = decrypted_value.split(ebm._split_byte())
# forcing to string text
decrypted_value = force_text(decrypted_value[0])
return decrypted_value
else:
return value
else:
return value
def get_encrypted_value (self, value, connection=None, prepared=False):
'''
Perform preliminary non-db specific value checks and conversions:
convert value from unicode to full byte, encrypted string, otherwise encryption
service may fail according to django docs this is different than str(value)
and necessary to django internals
https://docs.djangoproject.com/en/dev/ref/unicode/
'''
if value is None:
return value
if len(value.strip()) == 0:
return value
# convert string value to bytes
value = smart_bytes(value, encoding='utf-8', strings_only=False, errors='strict')
if self.use_encryption:
key = self.akms.get_key()
iv = self.akms.get_iv()
if value and not self._is_encrypted(value, key, iv):
if len(value) > self.user_specified_max_length:
raise ValueError(
"Field value longer than max allowed: {0} > {1}".format(
str(len(value)),
self.user_specified_max_length
)
)
pad_length = ebm._padding_length(value, self.block_size)
if pad_length > 0:
value += ebm._split_byte() + ebm._semi_random_padding_string(pad_length-1)
value = self.aes.encrypt(value, key, iv)
if len(value) % 2 != 0:
# Some encryption services add a checksum byte which throws off the pad_length
value += ebm._split_byte()
value = binascii.b2a_hex(value)
# need to decode to string to store in database
value = value.decode("utf8")
return value
class EncryptCharField(BaseField):
# from_db_value is called in all circumstances when
# the data is loaded from the database
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
return self.get_decrypted_value(value)
def get_internal_type(self):
return 'CharField'
def deconstruct(self):
name, path, args, kwargs = super(EncryptCharField, self).deconstruct()
kwargs["max_length"] = 255
return name, path, args, kwargs
def formfield(self, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(EncryptCharField, self).formfield(**defaults)
# method to convert data to encrypted format before they are stored in database
def get_db_prep_value(self, value, connection=None, prepared=False):
if self.use_encryption:
key = self.akms.get_key()
iv = self.akms.get_iv()
if value and not self._is_encrypted(value, key, iv):
if len(value) > self.user_specified_max_length:
raise ValueError(
"Field value longer than max allowed: {0} > {1}".format(
str(len(value)),
self.user_specified_max_length
)
)
return self.get_encrypted_value(value, connection=connection, prepared=prepared)
class EncryptDateField(BaseField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10 # YYYY:MM:DD format
super(EncryptDateField, self).__init__(*args, **kwargs)
# from_db_value is called in all circumstances
# when the data is loaded from the database
def from_db_value(self, value, expression, connection, context):
dv = None
if value in fields.EMPTY_VALUES:
dv = value
elif isinstance(value, datetime.date):
dv = value
else:
input_text = self.get_decrypted_value(value)
try:
dv = datetime.date(*[int(x) for x in input_text.split(':')])
except ValueError:
log.error("Decryption failed - old ehb values need to be updated")
return dv
def deconstruct(self):
name, path, args, kwargs = super(EncryptDateField, self).deconstruct()
kwargs["max_length"] = 10
return name, path, args, kwargs
def get_internal_type(self):
return 'CharField'
def formfield(self, **kwargs):
defaults = {'widget': forms.DateInput, 'form_class': forms.DateField}
defaults.update(kwargs)
return super(EncryptDateField, self).formfield(**defaults)
# for django custom fields, to_python() is called by deserialization
# and during the clean() method used from forms
def to_python(self, value):
dv = None
if value in fields.EMPTY_VALUES:
dv = value
elif isinstance(value, datetime.date):
dv = value
else:
input_text = self.get_decrypted_value(value)
try:
dv = datetime.date(*[int(x) for x in input_text.split('-')])
except:
dv = datetime.date(*[int(x) for x in input_text.split(':')])
return dv
# method to convert data to encrypted format before they are stored in database
def get_db_prep_value(self, value, connection=None, prepared=False):
if isinstance(value, datetime.date):
value = value.strftime('%Y:%m:%d')
return self.get_encrypted_value(value, connection=connection, prepared=prepared)
| |
"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possibilities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found an assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| |
import pygame
import random
import csv
import agent
import numpy as np
import sklearn
names = []
with open("agent-names.csv", "rt") as f:
reader = csv.reader(f)
for row in reader:
names.append(row[0])
seed = 224225
running = True
random.seed(seed)
class Window:
def __init__(self):
pygame.init()
# init display
self.display_width = 32*25 # default = 32*40
self.display_height = 32*25 # default = 32*30
self.side_display_width = 32*20
self.side_display_height = 32*0
self.display = pygame.display.set_mode((self.display_width+self.side_display_width, self.display_height+self.side_display_height))
# init misc
self.clock = pygame.time.Clock()
self.grid_size = 32
self.total_steps = 1
self.paused = True
self.world_speed = 3
self.world_experience_log_length = 22
self.world_experience_log = []
self.world_experience_stats_length = 65
self.world_experience_stats = [[0]*self.world_experience_log_length]*self.world_experience_stats_length
# init focus
self.focus = None # default = None
self.focus_msg = ("Initiated", 0)
# init focus graphs
self.focus_graphs = False
self.focus_visualize_frequency = 5 # how many steps between
self.focus_visualize_time = 1 # in seconds
# init sidebar values
self.brain_map_frequency = 5 # every n experiences
# init colors
self.WHITE = (255, 255, 255)
self.GRAY = (225, 225, 225)
self.DK_GRAY = (122, 122, 122)
self.BLACK = (0, 0, 0)
self.RED = (255, 0, 0)
self.GREEN = (0, 255, 0)
self.BLUE = (0, 0, 255)
self.YELLOW = (255, 255, 0)
self.LT_RED = (255, 122, 122)
self.LT_GREEN = (122, 255, 122)
# init font
self.FNT_TINY = pygame.font.SysFont("arial", 9)
self.FNT_SMALL = pygame.font.SysFont("arial", 11)
self.FNT_MEDIUM = pygame.font.SysFont("arial", 14)
self.FNT_LARGE = pygame.font.SysFont("arial", 16)
# init dicts
self.colors = {0: self.RED, 1: self.GREEN, 2: self.BLUE}
self.color_names = {0: "red", 1: "green", 2: "blue"}
self.shapes = {0: "square", 1: "circle", 2: "triangle"}
self.sentiments = {-2: "very negative", -1: "negative", 0: "neutral", 1: "positive", 2: "very positive"}
self.sentiment_colors = {-2: self.RED, -1: self.LT_RED, 0: self.GRAY, 1: self.LT_GREEN, 2: self.GREEN}
self.sentiment_colors_alt = {-2: self.RED, -1: self.LT_RED, 0: self.BLACK, 1: self.LT_GREEN, 2: self.GREEN}
self.biomes = {-1: "limbo", 0: "bme0", 1: "bme1", 2: "bme2", 3: "bme3"}
self.directions = {0: "N", 1: "E", 2: "S", 3: "W"}
# init agents
starting_agents = 60 # max = 120
self.agents = []
for i in range(starting_agents):
self.create_agent(i)
def main(self):
self.display.fill(self.WHITE)
self.mouse_x, self.mouse_y = pygame.mouse.get_pos()
self.draw_grid(self.grid_size)
if not self.paused:
agent_pos_list = []
for agent in self.agents:
agent_pos_list.append((agent[4], agent[5]))
for agent in self.agents:
# if (self.total_steps) % max(1, (60//self.world_speed)) == 0:
if (self.total_steps+(agent[0]*4)+len(agent[10])) % max(1, (180//self.world_speed)) == 0:
eye = agent[6]
brain = agent[7]
muscle = agent[8]
# process any experience had
agent_contacts = []
for a in enumerate(agent_pos_list):
# check for contact with another agent
if a[1] == (agent[4], agent[5]) and a[0] != agent[0]:
other_agent = a[0]
other_color = self.agents[other_agent][2]
other_shape = self.agents[other_agent][3]
other_x = self.agents[other_agent][4]
other_y = self.agents[other_agent][5]
other_biome = -1
# get features
X_1 = other_color
X_2 = other_shape
X_3 = other_biome
# decide sentiment (label)
sent = self.decide_sentiment(agent, X_1, X_2, X_3)
self.log_experience(self.total_steps, agent[0], a[0], (agent[4], agent[5]), sent)
# experience
self.experience(agent, [X_1, X_2, X_3], sent)
if brain.total_experiences() % self.brain_map_frequency == 0:
try:
new_sent = agent[7].predict([[X_1, X_2, X_3]])
agent[9][X_1][X_2] = new_sent # push to brain map
except:
pass
# print experience
if agent[0] == self.focus:
print("[@{}] X = [{} {} {}], y = {}".format(self.total_steps, X_1, X_2, X_3 , sent))
self.focus_msg = ("#{}, {}, {}, {}: {}".format(other_agent, self.color_names[other_color], self.shapes[other_shape], self.biomes[other_biome], self.sentiments[sent]), sent)
# learn from experiences
if brain.total_experiences() > 3:
brain.learn()
if agent[0] == self.focus and self.focus_graphs and brain.total_experiences() % self.focus_visualize_frequency == 0:
# self.check_agent(agent[0])
brain.visualize("AGENT#{}: {}".format(agent[0], agent[1]), time_limit=self.focus_visualize_time)
# EYE
agent_list, agent_pos_list = eye.percieve_area(agent[0], self.agents, agent[4], agent[5])
agent[11] = agent_pos_list
# BRAIN
agent_sent_list = brain.evaluate_agents(self.agents, agent_list)
agent[12] = agent_sent_list
# MUSCLE
sect_scores = muscle.evaluate_directions(agent_pos_list, agent_sent_list)
sect_best_list = [i for i, j in enumerate(sect_scores) if j == max(sect_scores)]
chosen_sect = random.choice(sect_best_list)
agent[13] = sect_scores + [chosen_sect]
agent[4], agent[5] = muscle.move_direction(agent[4], agent[5], chosen_sect)
# random movement
# potential_cells = eye.look(agent[4], agent[5])
# move_cell = random.choice(potential_cells)
# agent[4], agent[5] = muscle.move(move_cell[0], move_cell[1])
# clamp pos
agent[4] = max(1, min(agent[4], (self.display_width//self.grid_size)-2))
agent[5] = max(1, min(agent[5], (self.display_height//self.grid_size)-2))
for agent in self.agents:
# draw agent
self.draw_agent(agent[4], agent[5], agent[2], agent[3], agent[0])
self.display_sidebar(self.display_width+16, 8)
self.display_controls(self.display_width+16+468-128, 12)
pygame.display.update()
self.clock.tick(60)
if not self.paused:
self.total_steps += 1
pygame.display.set_caption("Seed: {}, FPS: {}".format(seed, round(self.clock.get_fps(),2)))
def log_experience(self, step, agent_a, agent_b, location, sentiment):
self.world_experience_log = [[step, agent_a, agent_b, location, sentiment]] + self.world_experience_log
if len(self.world_experience_log) > self.world_experience_log_length:
self.world_experience_log.pop(self.world_experience_log_length)
# add to world experience stats
world_experience_stats_entry = [0]*self.world_experience_log_length
for i, exp in enumerate(self.world_experience_log):
world_experience_stats_entry[i] = exp[4]
self.world_experience_stats = [world_experience_stats_entry] + self.world_experience_stats
if len(self.world_experience_stats) > self.world_experience_stats_length:
self.world_experience_stats.pop(self.world_experience_stats_length)
def experience(self, agent, X, sentiment):
agent[7].process_experience(X, sentiment)
agent[10].append(sentiment)
def decide_sentiment(self, agent, color, shape, biome):
sent = 0
self_color = agent[2]
self_shape = agent[3]
# if self_color == 0 and color == 1:
# sent = -2
# elif self_color == 1 and color == 2:
# sent = -2
# elif self_color == 2 and color == 0:
# sent = -2
# else:
# if self_shape == shape:
# sent = 2
if self_color != color:
sent = -2
else:
sent = 2
# sent = random.randint(-2, 2)
return sent
def draw_grid(self, grid_size):
for col in range((self.display_width//grid_size)+1):
pygame.draw.line(self.display, self.GRAY, (col*grid_size, 0), (col*grid_size, self.display_height))
for row in range((self.display_height//grid_size)+1):
pygame.draw.line(self.display, self.GRAY, (0, row*grid_size), (self.display_width, row*grid_size))
def draw_agent(self, x, y, color, shape, number):
x = x*self.grid_size+(self.grid_size//2)
y = y*self.grid_size+(self.grid_size//2)
self.draw_agent_body(x, y, color, shape)
num = self.FNT_SMALL.render("#{}".format(number), True, self.BLACK)
num_rect = num.get_rect(center=(x,y-20))
self.display.blit(num, num_rect)
name = self.FNT_SMALL.render("{}".format(self.agents[number][1]), True, self.BLACK)
name_rect = name.get_rect(center=(x,y+22))
self.display.blit(name, name_rect)
if number == self.focus:
pygame.draw.rect(self.display, self.YELLOW, (x-32, y-32, 64, 64), 3)
def draw_agent_body(self, x, y, color, shape):
if self.shapes[shape] == "square":
pygame.draw.rect(self.display, self.colors[color], (x-10, y-10, 20, 20))
pygame.draw.rect(self.display, self.BLACK, (x-11, y-11, 22, 22), 3)
elif self.shapes[shape] == "circle":
pygame.draw.circle(self.display, self.colors[color], (x, y), 12)
pygame.draw.circle(self.display, self.BLACK, (x, y), 13, 3)
elif self.shapes[shape] == "triangle":
pygame.draw.polygon(self.display, self.colors[color], ((x, y-10), (x-10, y+10), (x+10, y+10)))
pygame.draw.polygon(self.display, self.BLACK, ((x, y-12), (x-12, y+12), (x+12, y+12)), 3)
def create_agent(self, number):
name = names[number]
color = random.choice([0, 1, 2]) # red, green, blue
shape = random.choice([0, 1, 2]) # square, circle, triangle
start_x = random.randint(0, (self.display_width//self.grid_size)-1)
start_y = random.randint(0, (self.display_height//self.grid_size)-1)
eye = agent.Eye()
brain = agent.Brain()
muscle = agent.Muscle()
brain_map = [[0,0,0],[0,0,0],[0,0,0]]
experience_history = []
eye_data = []
brain_data = []
muscle_data = []
self.agents.append([number, name, color, shape, start_x, start_y, eye, brain, muscle, brain_map, experience_history, eye_data, brain_data, muscle_data])
print("[AGENT#{}] Created! number={}, name={}, color={}({}), shape={}({}), pos=({}, {})".format(number, number, name, color, self.color_names[color], shape, self.shapes[shape], start_x, start_y))
def display_controls(self, x, y):
# auto on/off
pygame.draw.rect(self.display, self.GRAY, (x-76, y, 64, 16))
if x-76 < self.mouse_x < x-16 and y < self.mouse_y < y+16:
pygame.draw.rect(self.display, self.BLACK, (x-76, y, 64, 16), 2)
if pygame.mouse.get_pressed()[0]:
self.paused = 1
text = self.FNT_SMALL.render("PAUSE", True, self.BLACK)
text_rect = text.get_rect(center=(x-76+32,y+8))
self.display.blit(text, text_rect)
if self.paused: col = self.LT_GREEN
else: col = self.GRAY
pygame.draw.rect(self.display, col, (x-76-68, y, 64, 16))
if x-76-68 < self.mouse_x < x-16-68 and y < self.mouse_y < y+16:
pygame.draw.rect(self.display, self.BLACK, (x-76-68, y, 64, 16), 2)
if pygame.mouse.get_pressed()[0]:
self.paused = 0
text = self.FNT_SMALL.render("PLAY", True, self.BLACK)
text_rect = text.get_rect(center=(x-76+32-68,y+8))
self.display.blit(text, text_rect)
# bar
sensitivity = 0.25
w = round(self.world_speed,1)
success = min(1.0, self.clock.get_fps() / 60)
if success < 0.6: col = self.RED
elif success < 0.8: col = self.LT_RED
else: col = self.DK_GRAY
pygame.draw.rect(self.display, self.GRAY, (x, y, 256, 16))
pygame.draw.rect(self.display, col, (x, y, self.world_speed/sensitivity, 16))
if x < self.mouse_x < x+256 and y < self.mouse_y < y+16:
pygame.draw.rect(self.display, self.BLACK, (x, y, 256, 16), 2)
if pygame.mouse.get_pressed()[0]:
xx = self.mouse_x-x
self.world_speed = max(1, xx*sensitivity)
text = self.FNT_SMALL.render("{} [{}%]".format(w, round(success*100, 1)), True, self.BLACK)
text_rect = text.get_rect(center=(x+128,y+8))
self.display.blit(text, text_rect)
def display_sidebar(self, x, y):
basic_info = self.FNT_MEDIUM.render("Steps: {}".format(self.total_steps), True, self.BLACK)
self.display.blit(basic_info, (x,y+3))
# brain maps
section_1_y = 48
columns = 15
rows = ((len(self.agents)-1) // columns)+1
size = 36
num = 0
for yy in range(rows):
for xx in range(columns):
if num <= len(self.agents)-1:
x1 = x+xx*(size+4)
y1 = y+yy*(size+16)+section_1_y
# draw number
name = self.FNT_SMALL.render("#{}".format(num), True, self.BLACK)
name_rect = name.get_rect(center=(x1+(size/2),y1-8))
self.display.blit(name, name_rect)
# draw base
pygame.draw.rect(self.display, self.GRAY, (x1, y1, size, size))
# draw contour
try:
for sentx in range(3):
for senty in range(3):
col = self.sentiment_colors[self.agents[num][9][sentx][senty]]
pygame.draw.rect(self.display, col, (x1+(sentx*(size/3)), y1+(senty*(size/3)), size/3, size/3))
except sklearn.exceptions.NotFittedError:
pass
# draw highlight box
if x1 < self.mouse_x < x1+size and y1 < self.mouse_y < y1+size and self.focus != num:
pygame.draw.rect(self.display, self.BLACK, (x1, y1, size, size), 1)
# self.draw_agent_body(x1+(size//2), y1+(size//2), self.agents[num][2], self.agents[num][3])
if pygame.mouse.get_pressed()[0]:
self.focus_msg = ("new focus", 0)
self.focus = num
if self.focus == num:
pygame.draw.rect(self.display, self.BLACK, (x1, y1, size, size), 3)
# increment num
num += 1
section_2_y = section_1_y + (rows*(size+16)) + 16
# world stats
if self.focus == None:
column_names = ["Step #", "Agent A", "Agent B", "Location", "Sentiment"]
text = self.FNT_MEDIUM.render("World Experience Log", True, self.BLACK)
self.display.blit(text, (x, section_2_y))
pygame.draw.rect(self.display, self.BLACK, (x, section_2_y+20, 310, 5+(self.world_experience_log_length)*18), 2)
for row in range(0, self.world_experience_log_length):
if row != 0:
try:
if self.world_experience_log[row][4] == 0: bgcol = self.GRAY
elif self.world_experience_log[row][4] > 0: bgcol = self.LT_GREEN
elif self.world_experience_log[row][4] < 0: bgcol = self.LT_RED
except:
bgcol = self.GRAY
pygame.draw.rect(self.display, bgcol, (x+5, section_2_y+(row*18)+22, 300, 16))
for column in range(0, 5):
x1 = x+(column*60)+5
y1 = section_2_y+(row*18)+22
if row == 0:
t = column_names[column]
else:
try:
t = self.world_experience_log[row][column]
except:
t = ""
text = self.FNT_MEDIUM.render("{}".format(t), True, self.BLACK)
self.display.blit(text, (x1, y1))
# recent experience map
text = self.FNT_MEDIUM.render("Recent Experience Map", True, self.BLACK)
self.display.blit(text, (x+320, section_2_y))
pygame.draw.rect(self.display, self.BLACK, (x+320, section_2_y+20, 275, 275), 2)
for exp in self.world_experience_log:
world_diff_x = 275/self.display_width
world_diff_y = 275/self.display_height
exp_x = x+328+(exp[3][0]*(32*world_diff_x))
exp_y = section_2_y+28+(exp[3][1]*(32*world_diff_y))
pygame.draw.circle(self.display, self.sentiment_colors_alt[exp[4]], (int(exp_x), int(exp_y)), 4)
# world experience stats
text = self.FNT_MEDIUM.render("World Experience Stats", True, self.BLACK)
self.display.blit(text, (x+320, section_2_y+300))
pygame.draw.rect(self.display, self.BLACK, (x+320, section_2_y+300+20, 275, 101), 2)
for i, exp_set in enumerate(self.world_experience_stats):
exp_list = []
for j, exp in enumerate(sorted(exp_set, reverse=True)):
color = self.sentiment_colors[exp]
pygame.draw.rect(self.display, color, (x+320+8+(i*4), section_2_y+300+20+7+(j*4), 3, 4))
# agent focus
if self.focus != None:
a = self.agents[self.focus]
agent_title = self.FNT_LARGE.render("AGENT#{}: {}".format(a[0], a[1]), True, self.BLACK, self.GRAY)
self.display.blit(agent_title, (x,y+section_2_y))
# agent info text
agent_color = self.FNT_MEDIUM.render("Color: {} ({})".format(self.color_names[a[2]].title(), a[2]), True, self.BLACK)
agent_shape = self.FNT_MEDIUM.render("Shape: {} ({})".format(self.shapes[a[3]], a[3]).title(), True, self.BLACK)
agent_pos = self.FNT_MEDIUM.render("Pos: ({}, {})".format(a[4], a[5]).title(), True, self.BLACK)
self.display.blit(agent_color, (x+32,y+section_2_y+20))
self.display.blit(agent_shape, (x+32,y+section_2_y+20+16))
self.display.blit(agent_pos, (x,y+section_2_y+20+32))
self.draw_agent_body(x+16, y+section_2_y+20+16, a[2], a[3])
# agent experience history
exp_size = 4
exp_shown_length = 368//exp_size
for i, exp in enumerate(a[10][-exp_shown_length:]):
pygame.draw.rect(self.display, self.sentiment_colors[exp], ((x+220)+i*exp_size, y+section_2_y+20, exp_size, 30))
text = self.FNT_MEDIUM.render("Last Experience:", True, self.DK_GRAY)
self.display.blit(text, (x+220,y+section_2_y))
agent_experiences = self.FNT_MEDIUM.render("Total Experiences: {}".format(a[7].total_experiences()), True, self.BLACK)
self.display.blit(agent_experiences, (x+220,y+section_2_y+20+32))
agent_focus_msg = self.FNT_MEDIUM.render("{}".format(self.focus_msg[0]), True, self.sentiment_colors_alt[self.focus_msg[1]])
agent_focus_msg_rect = agent_focus_msg.get_rect()
agent_focus_msg_rect.right = x+220+368
agent_focus_msg_rect.top = y+section_2_y
self.display.blit(agent_focus_msg, agent_focus_msg_rect)
agent_experience_rate = self.FNT_MEDIUM.render("{} per ksteps".format(round(a[7].total_experiences()/self.total_steps*1000)), True, self.BLACK)
agent_experience_rate_rect = agent_experience_rate.get_rect()
agent_experience_rate_rect.right = x+220+368
agent_experience_rate_rect.top = y+section_2_y+20+32
self.display.blit(agent_experience_rate, agent_experience_rate_rect)
# agent sections
section_3_y = section_2_y + 128
if self.focus != None:
a = self.agents[self.focus]
# eye
x1, y1 = x, section_3_y
text = self.FNT_LARGE.render("Eye", True, self.BLACK)
self.display.blit(text, (x1, y1-24))
pygame.draw.rect(self.display, self.BLACK, (x1, y1, 160, 160), 2)
# eye grid
for yy in range(7):
for xx in range(7):
if (xx, yy) == (3, 3):
col = self.BLACK
elif (xx, yy) in a[11]:
col = self.DK_GRAY
else:
col = self.GRAY
pygame.draw.rect(self.display, col, (x1+(xx*20)+11, y1+(yy*20)+11, 19, 19))
text = self.FNT_LARGE.render("{}".format(len(a[11])), True, self.BLACK)
self.display.blit(text, (x1+4, y1+1))
# text below
text = self.FNT_SMALL.render("{}".format(a[11][:5]), True, self.BLACK)
self.display.blit(text, (x1, y1+162))
# eye to brain link
pygame.draw.line(self.display, self.BLACK, (x1+160, y1+80), (x1+220, y1+80), 2)
# brain
x1, y1 = x+220, section_3_y
text = self.FNT_LARGE.render("Brain", True, self.BLACK)
self.display.blit(text, (x1, y1-24))
pygame.draw.rect(self.display, self.BLACK, (x1, y1, 160, 160), 2)
# brain grid
for yy in range(7):
for xx in range(7):
if (xx, yy) in a[11]:
try:
index = a[11].index((xx, yy))
sent = a[12][index]
col = self.sentiment_colors_alt[sent]
except:
col = self.DK_GRAY
else:
col = self.GRAY
pygame.draw.rect(self.display, col, (x1+(xx*20)+11, y1+(yy*20)+11, 19, 19))
pygame.draw.rect(self.display, self.BLACK, (x1+71, y1+71, 19, 19), 1)
# text below
text = self.FNT_SMALL.render("{}".format(a[12][:9]), True, self.BLACK)
self.display.blit(text, (x1, y1+162))
# brain to muscle link
pygame.draw.line(self.display, self.BLACK, (x1+160, y1+80), (x1+220, y1+80), 2)
# muscle
x1, y1 = x+440, section_3_y
text = self.FNT_LARGE.render("Muscle", True, self.BLACK)
self.display.blit(text, (x1, y1-24))
pygame.draw.rect(self.display, self.BLACK, (x1, y1, 160, 160), 2)
# visualization of muscle preference
pygame.draw.rect(self.display, self.BLACK, (x1+80-8, y1+80-8, 16, 16))
sect_scores = a[13]
# draw lines for each direction (NESW)
if len(sect_scores) > 0:
if sect_scores[0] != 0:
if sect_scores[0] >= 0: col = self.GREEN
else: col = self.RED
pygame.draw.line(self.display, col, (x1+80, y1+80-8), (x1+80, y1+80-8-min(64, abs((sect_scores[0]+1)*8))), 9)
if sect_scores[1] != 0:
if sect_scores[1] >= 0: col = self.GREEN
else: col = self.RED
pygame.draw.line(self.display, col, (x1+80+8, y1+80), (x1+80+8+min(64, abs((sect_scores[1]+1)*8)), y1+80), 9)
if sect_scores[2] != 0:
if sect_scores[2] >= 0: col = self.GREEN
else: col = self.RED
pygame.draw.line(self.display, col, (x1+80, y1+80+8), (x1+80, y1+80+8+min(64, abs((sect_scores[2]+1)*8))), 9)
if sect_scores[3] != 0:
if sect_scores[3] >= 0: col = self.GREEN
else: col = self.RED
pygame.draw.line(self.display, col, (x1+80-8, y1+80), (x1+80-8-min(64, abs((sect_scores[3]+1)*8)), y1+80), 9)
# text for chosen direction
text = self.FNT_LARGE.render("{}".format(self.directions[sect_scores[4]]), True, self.BLACK)
self.display.blit(text, (x1+4, y1+4))
if sect_scores[4] == 0: pygame.draw.line(self.display, self.BLACK, (x1+80, y1+80), (x1+80, y1+80-60), 3)
elif sect_scores[4] == 1: pygame.draw.line(self.display, self.BLACK, (x1+80, y1+80), (x1+80+60, y1+80), 3)
elif sect_scores[4] == 2: pygame.draw.line(self.display, self.BLACK, (x1+80, y1+80), (x1+80, y1+80+60), 3)
elif sect_scores[4] == 3: pygame.draw.line(self.display, self.BLACK, (x1+80, y1+80), (x1+80-60, y1+80), 3)
# text below, direction scores
text = self.FNT_SMALL.render("{}".format(sect_scores[:4]), True, self.BLACK)
self.display.blit(text, (x1, y1+162))
Window = Window()
while running:
Window.main()
mouse_pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and mouse_pos[0] < Window.display_width:
Window.focus = None
# events
for event in pygame.event.get():
if event.type == pygame.QUIT: # quitting
running = False
pygame.quit()
quit()
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test descendant package tracking code
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[-1], 0, 1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
try:
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
except JSONRPCException as e:
print("too-long-ancestor-chain successfully rejected")
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(chain[-1], 0, 2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
for i in range(MAX_DESCENDANTS):
utxo = transaction_package.pop(0)
try:
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
if i == MAX_DESCENDANTS - 2:
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
except JSONRPCException as e:
print(e.error['message'])
assert_equal(i, MAX_DESCENDANTS - 1)
print("tx that would create too large descendant package successfully rejected")
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A python command line tool to automate building and deployment of Sphinx documentation on GitHub static pages"""
from subprocess import call
import os
from distutils.dir_util import copy_tree, remove_tree
import shutil
from tempfile import mkdtemp
from datetime import datetime
import argparse
from git import Repo
import sys
__year__ = datetime.now().year
__author__ = "Francois Rongere"
__copyright__ = "Copyright 2016-%u, Ecole Centrale de Nantes" % __year__
__credits__ = "Francois Rongere"
__licence__ = "BSD"
__version__ = "0.1"
__maintainer__ = "Francois Rongere"
__email__ = "franrongere@gmail.com"
__status__ = "Development"
__all__ = ['main']
def get_current_branch(repo):
branches = repo.git.branch()
for branch in branches.split('\n'):
if branch.startswith('*'):
cur_branch = branch[2:]
break
return cur_branch
def checkout_branch(repo, branch):
# TODO: faire le check que la branche demandee existe bien dans le repo
print "\nChecking out to branch %s" % branch
try:
print repo.git.checkout(branch)
except:
raise RuntimeError('Unable to checkout to branch %s' % branch)
def is_github_repo(s):
return 'github.com' in s
def is_doc_folder(s):
return os.path.isfile('conf.py')
# Building parser
# ------------------------------------------------
try:
import argcomplete
has_argcomplete = True
except:
has_argcomplete = False
parser = argparse.ArgumentParser(
description=""" -- SPHINX2GH --
A command line tool to automation the deployment of Sphinx documentation
to GiHub static pages.
""",
epilog='-- Copyright 2016-%u -- BSD --\n Francois Rongere' % __year__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('repo', type=str, help="""path or url to the repo""")
parser.add_argument('--remote-gh', type=str, help="""The remote GitHub repository""")
parser.add_argument('--build-branch', type=str, help="The branch where we want to build documentation")
parser.add_argument('--doc-src', type=str, help="The folder where the documentation located")
parser.add_argument('--commit-msg', type=str, help="The commit message in gh-pages branch")
def main():
if has_argcomplete:
argcomplete.autocomplete(parser)
args, unknown = parser.parse_known_args()
print("\n=====================================================")
print("Building and Deploying Sphinx documentation on GitHub")
print("=====================================================\n")
# Build temp folder to clone the repo
working_dir = mkdtemp(suffix='_doc')
print "\t--> Cloning the repository in %s" % working_dir
repo = Repo.clone_from(args.repo, working_dir)
os.chdir(working_dir)
# TODO: check here if the gh-pages branch does exist
cur_branch = get_current_branch(repo)
# Setting the build branch
if args.build_branch is None:
# We consider it is the current branch
build_branch = cur_branch
else:
# TODO: check if args.build_branch does exist
build_branch = args.build_branch
print "\n\t* Documentation wil be built on branch %s" % build_branch
# Setting the remote GitHub repository
if args.remote_gh is not None:
try:
assert is_github_repo(args.remote_gh)
except AssertionError:
raise AssertionError('%s is not a gitHub repository' % args.remote_gh)
remote_gh = args.remote_gh
else:
if is_github_repo(args.repo):
remote_gh = args.repo
else:
raise Exception('As the source repo is not a GitHub repo, you have to provide a remote GitHub with '
'the --remote-gh option')
repo.git.remote('add', 'github', remote_gh)
print "\n\t* Documentation will be pushed on the GitHub repo with url %s" % remote_gh
# Setting the documentation folder
doc_folder_guess = ['docs', 'doc', 'documentation']
if args.doc_src is None:
for doc_folder in doc_folder_guess:
try:
os.chdir(doc_folder)
doc_src = os.getcwd()
break
except OSError:
pass
else:
raise Exception('Tried to guess the doc folder but failed. Please provide it by the --doc-src option')
else:
try:
assert os.path.isdir(args.doc_src)
doc_src = args.doc_src
except AssertionError:
raise OSError('%s is not a valid doc folder' % args.doc_src)
os.chdir(args.doc_src)
try:
assert is_doc_folder(doc_src)
except AssertionError:
raise AssertionError('%s is not a valid sphinx documentation folder' % doc_src)
print "\n\t* The documentation folder is %s" % doc_src
# Checking out the build branch
build_sha = repo.git.log(n='1', pretty="format:%H")
print "\n\t--> Checking out to build branch %s" % build_branch
checkout_branch(repo, build_branch)
# repo.git.pull(args.repo, build_branch)
# Building the documentation
print "\n\t--> Building the sphinx HTML documentation"
try:
call(['make', 'clean'])
call(['make', 'html'])
except:
raise Exception('Unable to build the documentation')
# Copying the HTML files to a temp dir
html_dir = mkdtemp(suffix='_html')
# TODO: recuperer le repertoire de build
print "\n\t--> Copying HTML files to %s" % html_dir
copy_tree(os.path.join(doc_src, '.build/html'), html_dir)
print "\n\t--> Cleaning the working copy"
call(['make', 'clean'])
os.chdir('..')
print "\n\t--> Checking out to branch gh-pages"
# repo.git.pull(args.repo, 'gh-pages')
checkout_branch(repo, 'gh-pages')
print "removing everything except .git and .gitignore"
filelist = [f for f in os.listdir('.') if not f.startswith('.git')]
for f in filelist:
if os.path.isfile(f):
os.remove(f)
if os.path.isdir(f):
shutil.rmtree(f)
print "\n\t--> Copying back HTML files from %s to %s" % (html_dir, working_dir)
copy_tree(html_dir, working_dir)
# If we have no .nojekyll file, we create it
if not os.path.isfile('.nojekyll'):
print "Adding a .nojekyll file"
with open('.nojekyll', 'w'):
os.utime('.nojekyll', None)
print "\n\t--> Commiting new documentation"
if args.commit_msg is None:
msg = "Documentation update from rev %s on branch %s" % (build_sha, build_branch)
else:
msg = args.commit_msg
repo.git.add('.', all=True)
print repo.git.commit(m=msg)
print "\n\t--> Pushing new revision to %s" % remote_gh
repo.git.push('github', 'gh-pages')
print "\n\t--> Cleaning temp folders"
os.chdir('..')
remove_tree(html_dir)
remove_tree(working_dir)
if __name__ == '__main__':
main()
| |
"""
Low level *Skype for Linux* interface implemented using *XWindows messaging*.
Uses direct *Xlib* calls through *ctypes* module.
This module handles the options that you can pass to `Skype.__init__`
for Linux machines when the transport is set to *X11*.
No further options are currently supported.
Warning PyGTK framework users
=============================
The multithreaded architecture of Skype4Py requires a special treatment
if the Xlib transport is combined with PyGTK GUI framework.
The following code has to be called at the top of your script, before
PyGTK is even imported.
.. python::
from Skype4Py.api.posix_x11 import threads_init
threads_init()
This function enables multithreading support in Xlib and GDK. If not done
here, this is enabled for Xlib library when the `Skype` object is instantiated.
If your script imports the PyGTK module, doing this so late may lead to a
segmentation fault when the GUI is shown on the screen.
A remedy is to enable the multithreading support before PyGTK is imported
by calling the ``threads_init`` function.
"""
__docformat__ = 'restructuredtext en'
import sys
import threading
import os
from ctypes import *
from ctypes.util import find_library
import time
import logging
from Skype4Py.api import Command, SkypeAPIBase, \
timeout2float, finalize_opts
from Skype4Py.enums import *
from Skype4Py.errors import SkypeAPIError
__all__ = ['SkypeAPI', 'threads_init']
# The Xlib Programming Manual:
# ============================
# http://tronche.com/gui/x/xlib/
# some Xlib constants
PropertyChangeMask = 0x400000
PropertyNotify = 28
ClientMessage = 33
PropertyNewValue = 0
PropertyDelete = 1
# some Xlib types
c_ulong_p = POINTER(c_ulong)
DisplayP = c_void_p
Atom = c_ulong
AtomP = c_ulong_p
XID = c_ulong
Window = XID
Bool = c_int
Status = c_int
Time = c_ulong
c_int_p = POINTER(c_int)
# should the structures be aligned to 8 bytes?
align = (sizeof(c_long) == 8 and sizeof(c_int) == 4)
# some Xlib structures
class XClientMessageEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('pad2', c_int),
('data', c_char * 20)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', c_char * 20)]
class XPropertyEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
('pad2', c_int)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int)]
class XErrorEvent(Structure):
if align:
_fields_ = [('type', c_int),
('pad0', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
else:
_fields_ = [('type', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
class XEvent(Union):
if align:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
else:
_fields_ = [('type', c_int),
('xclient', XClientMessageEvent),
('xproperty', XPropertyEvent),
('xerror', XErrorEvent),
('pad', c_long * 24)]
XEventP = POINTER(XEvent)
if getattr(sys, 'skype4py_setup', False):
# we get here if we're building docs; to let the module import without
# exceptions, we emulate the X11 library using a class:
class X(object):
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
pass
x11 = X()
else:
# load X11 library (Xlib)
libpath = find_library('X11')
if not libpath:
raise ImportError('Could not find X11 library')
x11 = cdll.LoadLibrary(libpath)
del libpath
# setup Xlib function prototypes
x11.XCloseDisplay.argtypes = (DisplayP,)
x11.XCloseDisplay.restype = None
x11.XCreateSimpleWindow.argtypes = (DisplayP, Window, c_int, c_int, c_uint,
c_uint, c_uint, c_ulong, c_ulong)
x11.XCreateSimpleWindow.restype = Window
x11.XDefaultRootWindow.argtypes = (DisplayP,)
x11.XDefaultRootWindow.restype = Window
x11.XDeleteProperty.argtypes = (DisplayP, Window, Atom)
x11.XDeleteProperty.restype = None
x11.XDestroyWindow.argtypes = (DisplayP, Window)
x11.XDestroyWindow.restype = None
x11.XFree.argtypes = (c_void_p,)
x11.XFree.restype = None
x11.XGetAtomName.argtypes = (DisplayP, Atom)
x11.XGetAtomName.restype = c_void_p
x11.XGetErrorText.argtypes = (DisplayP, c_int, c_char_p, c_int)
x11.XGetErrorText.restype = None
x11.XGetWindowProperty.argtypes = (DisplayP, Window, Atom, c_long, c_long, Bool,
Atom, AtomP, c_int_p, c_ulong_p, c_ulong_p, POINTER(POINTER(Window)))
x11.XGetWindowProperty.restype = c_int
x11.XInitThreads.argtypes = ()
x11.XInitThreads.restype = Status
x11.XInternAtom.argtypes = (DisplayP, c_char_p, Bool)
x11.XInternAtom.restype = Atom
x11.XNextEvent.argtypes = (DisplayP, XEventP)
x11.XNextEvent.restype = None
x11.XOpenDisplay.argtypes = (c_char_p,)
x11.XOpenDisplay.restype = DisplayP
x11.XPending.argtypes = (DisplayP,)
x11.XPending.restype = c_int
x11.XSelectInput.argtypes = (DisplayP, Window, c_long)
x11.XSelectInput.restype = None
x11.XSendEvent.argtypes = (DisplayP, Window, Bool, c_long, XEventP)
x11.XSendEvent.restype = Status
x11.XLockDisplay.argtypes = (DisplayP,)
x11.XLockDisplay.restype = None
x11.XUnlockDisplay.argtypes = (DisplayP,)
x11.XUnlockDisplay.restype = None
def threads_init(gtk=True):
"""Enables multithreading support in Xlib and PyGTK.
See the module docstring for more info.
:Parameters:
gtk : bool
May be set to False to skip the PyGTK module.
"""
# enable X11 multithreading
x11.XInitThreads()
if gtk:
from gtk.gdk import threads_init
threads_init()
class SkypeAPI(SkypeAPIBase):
def __init__(self, opts):
self.logger = logging.getLogger('Skype4Py.api.posix_x11.SkypeAPI')
SkypeAPIBase.__init__(self)
finalize_opts(opts)
# initialize threads if not done already by the user
threads_init(gtk=False)
# init Xlib display
self.disp = x11.XOpenDisplay(None)
if not self.disp:
raise SkypeAPIError('Could not open XDisplay')
self.win_root = x11.XDefaultRootWindow(self.disp)
self.win_self = x11.XCreateSimpleWindow(self.disp, self.win_root,
100, 100, 100, 100, 1, 0, 0)
x11.XSelectInput(self.disp, self.win_root, PropertyChangeMask)
self.win_skype = self.get_skype()
ctrl = 'SKYPECONTROLAPI_MESSAGE'
self.atom_msg = x11.XInternAtom(self.disp, ctrl, False)
self.atom_msg_begin = x11.XInternAtom(self.disp, ctrl + '_BEGIN', False)
self.loop_event = threading.Event()
self.loop_timeout = 0.0001
self.loop_break = False
def __del__(self):
if x11:
if hasattr(self, 'disp'):
if hasattr(self, 'win_self'):
x11.XDestroyWindow(self.disp, self.win_self)
x11.XCloseDisplay(self.disp)
def run(self):
self.logger.info('thread started')
# main loop
event = XEvent()
data = ''
while not self.loop_break and x11:
while x11.XPending(self.disp):
self.loop_timeout = 0.0001
x11.XNextEvent(self.disp, byref(event))
# events we get here are already prefiltered by the predicate function
if event.type == ClientMessage:
if event.xclient.format == 8:
if event.xclient.message_type == self.atom_msg_begin:
data = str(event.xclient.data)
elif event.xclient.message_type == self.atom_msg:
if data != '':
data += str(event.xclient.data)
else:
self.logger.warning('Middle of Skype X11 message received with no beginning!')
else:
continue
if len(event.xclient.data) != 20 and data:
self.notify(data.decode('utf-8'))
data = ''
elif event.type == PropertyNotify:
namep = x11.XGetAtomName(self.disp, event.xproperty.atom)
is_inst = (c_char_p(namep).value == '_SKYPE_INSTANCE')
x11.XFree(namep)
if is_inst:
if event.xproperty.state == PropertyNewValue:
self.win_skype = self.get_skype()
# changing attachment status can cause an event handler to be fired, in
# turn it could try to call Attach() and doing this immediately seems to
# confuse Skype (command '#0 NAME xxx' returns '#0 CONNSTATUS OFFLINE' :D);
# to fix this, we give Skype some time to initialize itself
time.sleep(1.0)
self.set_attachment_status(apiAttachAvailable)
elif event.xproperty.state == PropertyDelete:
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
self.loop_event.wait(self.loop_timeout)
if self.loop_event.isSet():
self.loop_timeout = 0.0001
elif self.loop_timeout < 1.0:
self.loop_timeout *= 2
self.loop_event.clear()
self.logger.info('thread finished')
def get_skype(self):
"""Returns Skype window ID or None if Skype not running."""
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if not skype_inst:
return
type_ret = Atom()
format_ret = c_int()
nitems_ret = c_ulong()
bytes_after_ret = c_ulong()
winp = pointer(Window())
fail = x11.XGetWindowProperty(self.disp, self.win_root, skype_inst,
0, 1, False, 33, byref(type_ret), byref(format_ret),
byref(nitems_ret), byref(bytes_after_ret), byref(winp))
if not fail and format_ret.value == 32 and nitems_ret.value == 1:
return winp.contents.value
def close(self):
self.loop_break = True
self.loop_event.set()
while self.isAlive():
time.sleep(0.01)
SkypeAPIBase.close(self)
def set_friendly_name(self, friendly_name):
SkypeAPIBase.set_friendly_name(self, friendly_name)
if self.attachment_status == apiAttachSuccess:
# reattach with the new name
self.set_attachment_status(apiAttachUnknown)
self.attach()
def attach(self, timeout, wait=True):
if self.attachment_status == apiAttachSuccess:
return
self.acquire()
try:
if not self.isAlive():
try:
self.start()
except AssertionError:
raise SkypeAPIError('Skype API closed')
try:
self.wait = True
t = threading.Timer(timeout2float(timeout), lambda: setattr(self, 'wait', False))
if wait:
t.start()
while self.wait:
self.win_skype = self.get_skype()
if self.win_skype is not None:
break
else:
time.sleep(1.0)
else:
raise SkypeAPIError('Skype attach timeout')
finally:
t.cancel()
command = Command('NAME %s' % self.friendly_name, '', True, timeout)
self.release()
try:
self.send_command(command, True)
finally:
self.acquire()
if command.Reply != 'OK':
self.win_skype = None
self.set_attachment_status(apiAttachRefused)
return
self.set_attachment_status(apiAttachSuccess)
finally:
self.release()
command = Command('PROTOCOL %s' % self.protocol, Blocking=True)
self.send_command(command, True)
self.protocol = int(command.Reply.rsplit(None, 1)[-1])
def is_running(self):
return (self.get_skype() is not None)
def startup(self, minimized, nosplash):
# options are not supported as of Skype 1.4 Beta for Linux
if not self.is_running():
if os.fork() == 0: # we're the child
os.setsid()
os.execlp('skype', 'skype')
def shutdown(self):
from signal import SIGINT
fh = os.popen('ps -o %p --no-heading -C skype')
pid = fh.readline().strip()
fh.close()
if pid:
os.kill(int(pid), SIGINT)
# Skype sometimes doesn't delete the '_SKYPE_INSTANCE' property
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if skype_inst:
x11.XDeleteProperty(self.disp, self.win_root, skype_inst)
self.win_skype = None
self.set_attachment_status(apiAttachNotAvailable)
def send_command(self, command, force=False):
if self.attachment_status != apiAttachSuccess and not force:
self.attach(command.Timeout)
self.push_command(command)
self.notifier.sending_command(command)
cmd = '#%d %s' % (command.Id, command.Command)
self.logger.debug('sending %s', repr(cmd))
if command.Blocking:
command._event = bevent = threading.Event()
else:
command._timer = timer = threading.Timer(command.timeout2float(), self.pop_command, (command.Id,))
event = XEvent()
event.xclient.type = ClientMessage
event.xclient.display = self.disp
event.xclient.window = self.win_self
event.xclient.message_type = self.atom_msg_begin
event.xclient.format = 8
cmd = cmd.encode('utf-8') + '\x00'
for i in range(0, len(cmd), 20):
event.xclient.data = cmd[i:i + 20]
x11.XSendEvent(self.disp, self.win_skype, False, 0, byref(event))
event.xclient.message_type = self.atom_msg
self.loop_event.set()
if command.Blocking:
bevent.wait(command.timeout2float())
if not bevent.isSet():
raise SkypeAPIError('Skype command timeout')
else:
timer.start()
def notify(self, cmd):
self.logger.debug('received %s', repr(cmd))
# Called by main loop for all received Skype commands.
if cmd.startswith('#'):
p = cmd.find(' ')
command = self.pop_command(int(cmd[1:p]))
if command is not None:
command.Reply = cmd[p + 1:]
if command.Blocking:
command._event.set()
else:
command._timer.cancel()
self.notifier.reply_received(command)
else:
self.notifier.notification_received(cmd[p + 1:])
else:
self.notifier.notification_received(cmd)
| |
import sys
import os
import time
import numpy as np
import unittest
import ray
from ray import tune
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.trial import Trial
from ray.tune import Callback
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ray.util import placement_group_table
from ray.cluster_utils import Cluster
from ray.rllib import _register_all
class TrialRunnerPlacementGroupTest(unittest.TestCase):
def setUp(self):
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "10000"
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "auto" # Reset default
self.head_cpus = 8
self.head_gpus = 4
self.head_custom = 16
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"include_dashboard": False,
"num_cpus": self.head_cpus,
"num_gpus": self.head_gpus,
"resources": {
"custom": self.head_custom
},
"_system_config": {
"num_heartbeats_timeout": 10
}
})
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def _assertCleanup(self, trial_executor):
# Assert proper cleanup
pg_manager = trial_executor._pg_manager
self.assertFalse(pg_manager._in_use_trials)
self.assertFalse(pg_manager._in_use_pgs)
self.assertFalse(pg_manager._staging_futures)
for pgf in pg_manager._staging:
self.assertFalse(pg_manager._staging[pgf])
for pgf in pg_manager._ready:
self.assertFalse(pg_manager._ready[pgf])
self.assertTrue(pg_manager._latest_staging_start_time)
num_non_removed_pgs = len([
p for pid, p in placement_group_table().items()
if p["state"] != "REMOVED"
])
self.assertEqual(num_non_removed_pgs, 0)
def testPlacementGroupRequests(self, reuse_actors=False, scheduled=10):
"""In this test we try to start 10 trials but only have resources
for 2. Placement groups should still be created and PENDING.
Eventually they should be scheduled sequentially (i.e. in pairs
of two)."""
# Since we check per-step placement groups, set the reconcilation
# interval to 0
os.environ["TUNE_PLACEMENT_GROUP_RECON_INTERVAL"] = "0"
def train(config):
time.sleep(1)
now = time.time()
tune.report(end=now - config["start_time"])
head_bundle = {"CPU": 4, "GPU": 0, "custom": 0}
child_bundle = {"custom": 1}
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle])
trial_executor = RayTrialExecutor(reuse_actors=reuse_actors)
this = self
class _TestCallback(Callback):
def on_step_end(self, iteration, trials, **info):
num_finished = len([
t for t in trials
if t.status == Trial.TERMINATED or t.status == Trial.ERROR
])
num_staging = sum(
len(s)
for s in trial_executor._pg_manager._staging.values())
num_ready = sum(
len(s) for s in trial_executor._pg_manager._ready.values())
num_in_use = len(trial_executor._pg_manager._in_use_pgs)
num_cached = len(trial_executor._pg_manager._cached_pgs)
total_num_tracked = num_staging + num_ready + \
num_in_use + num_cached
num_non_removed_pgs = len([
p for pid, p in placement_group_table().items()
if p["state"] != "REMOVED"
])
num_removal_scheduled_pgs = len(
trial_executor._pg_manager._pgs_for_removal)
# All trials should be scheduled
this.assertEqual(
scheduled,
min(scheduled, len(trials)),
msg=f"Num trials iter {iteration}")
# The number of PGs should decrease when trials finish
this.assertEqual(
max(scheduled, len(trials)) - num_finished,
total_num_tracked,
msg=f"Num tracked iter {iteration}")
# The number of actual placement groups should match this
this.assertEqual(
max(scheduled, len(trials)) - num_finished,
num_non_removed_pgs - num_removal_scheduled_pgs,
msg=f"Num actual iter {iteration}")
start = time.time()
out = tune.run(
train,
config={"start_time": start},
resources_per_trial=placement_group_factory,
num_samples=10,
trial_executor=trial_executor,
callbacks=[_TestCallback()],
reuse_actors=reuse_actors,
verbose=2)
trial_end_times = sorted(t.last_result["end"] for t in out.trials)
print("Trial end times:", trial_end_times)
max_diff = trial_end_times[-1] - trial_end_times[0]
# Not all trials have been run in parallel
self.assertGreater(max_diff, 3)
# Some trials should have run in parallel
# Todo: Re-enable when using buildkite
# self.assertLess(max_diff, 10)
self._assertCleanup(trial_executor)
def testPlacementGroupRequestsWithActorReuse(self):
"""Assert that reuse actors doesn't leak placement groups"""
self.testPlacementGroupRequests(reuse_actors=True)
def testPlacementGroupLimitedRequests(self):
"""Assert that maximum number of placement groups is enforced."""
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "6"
self.testPlacementGroupRequests(scheduled=6)
def testPlacementGroupLimitedRequestsWithActorReuse(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "6"
self.testPlacementGroupRequests(reuse_actors=True, scheduled=6)
def testPlacementGroupDistributedTraining(self, reuse_actors=False):
"""Run distributed training using placement groups.
Each trial requests 4 CPUs and starts 4 remote training workers.
"""
head_bundle = {"CPU": 1, "GPU": 0, "custom": 0}
child_bundle = {"CPU": 1}
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle, child_bundle])
@ray.remote
class TrainingActor:
def train(self, val):
time.sleep(1)
return val
def train(config):
base = config["base"]
actors = [TrainingActor.remote() for _ in range(4)]
futures = [
actor.train.remote(base + 2 * i)
for i, actor in enumerate(actors)
]
results = ray.get(futures)
end = time.time() - config["start_time"]
tune.report(avg=np.mean(results), end=end)
trial_executor = RayTrialExecutor(reuse_actors=reuse_actors)
start = time.time()
out = tune.run(
train,
config={
"start_time": start,
"base": tune.grid_search(list(range(0, 100, 10)))
},
resources_per_trial=placement_group_factory,
num_samples=1,
trial_executor=trial_executor,
reuse_actors=reuse_actors,
verbose=2)
avgs = sorted(t.last_result["avg"] for t in out.trials)
self.assertSequenceEqual(avgs, list(range(3, 103, 10)))
trial_end_times = sorted(t.last_result["end"] for t in out.trials)
print("Trial end times:", trial_end_times)
max_diff = trial_end_times[-1] - trial_end_times[0]
# Not all trials have been run in parallel
self.assertGreater(max_diff, 3)
# Some trials should have run in parallel
# Todo: Re-enable when using buildkite
# self.assertLess(max_diff, 10)
self._assertCleanup(trial_executor)
def testPlacementGroupDistributedTrainingWithActorReuse(self):
self.testPlacementGroupDistributedTraining(reuse_actors=True)
class PlacementGroupNoAutoSetupTest(unittest.TestCase):
def testPlacementGroupNoCPUDriver(self):
"""Bundles with only GPU:1 but no CPU should work"""
ray.init(num_gpus=1, num_cpus=1)
pgf = PlacementGroupFactory([{"GPU": 1, "CPU": 0}, {"CPU": 1}])
def train(config):
time.sleep(1)
return 5
tune.run(train, resources_per_trial=pgf)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| |
import Dataset
import os
import sys
import math
import timeit
import argparse
import tensorflow as tf
import numpy as np
import logging as log
import matplotlib.pyplot as plt
from sklearn import metrics
from Dataset import IMG_SIZE, LABELS_DICT
TRAIN_IMAGE_DIR = os.getcwd() + '/dataset'
TEST_IMAGE_DIR = os.getcwd() + '/test_dataset'
CKPT_DIR = 'ckpt_dir'
MODEL_CKPT = 'ckpt_dir/model.cktp'
### Parameters for Logistic Regression ###
BATCH_SIZE = 64
### Network Parameters ###
n_input = IMG_SIZE**2
n_classes = 4
n_channels = 3
input_dropout = 0.8
hidden_dropout = 0.5
std_dev = 0.1 #math.sqrt(2/n_input) # http://cs231n.github.io/neural-networks-2/#init
class ConvNet(object):
## Constructor to build the model for the training ##
def __init__(self, **kwargs):
params = set(['learning_rate','max_epochs','display_step','dataset_training','dataset_test'])
# initialize all allowed keys to false
self.__dict__.update((key, False) for key in params)
# and update the given keys by their given values
self.__dict__.update((key, value) for key, value in kwargs.iteritems() if key in params)
if(self.dataset_training != False):
self.train_imgs_lab = Dataset.loadDataset(self.dataset_training)
else:
self.test_imgs_lab = Dataset.loadDataset(self.dataset_test)
# Store layers weight & bias
self.weights = {
'wc1': tf.Variable(tf.random_normal([11, 11, n_channels, BATCH_SIZE], stddev=std_dev)),
'wc2': tf.Variable(tf.random_normal([5, 5, BATCH_SIZE, BATCH_SIZE*2], stddev=std_dev)),
'wc3': tf.Variable(tf.random_normal([3, 3, BATCH_SIZE*2, BATCH_SIZE*4], stddev=std_dev)),
'wc4': tf.Variable(tf.random_normal([3, 3, BATCH_SIZE*4, BATCH_SIZE*4], stddev=std_dev)),
'wc5': tf.Variable(tf.random_normal([3, 3, BATCH_SIZE*4, 256], stddev=std_dev)),
'wd': tf.Variable(tf.random_normal([2*2*256, 4096])),
'wfc': tf.Variable(tf.random_normal([4096, 2*2*256], stddev=std_dev)),
'out': tf.Variable(tf.random_normal([2*2*256, n_classes], stddev=std_dev))
}
self.biases = {
'bc1': tf.Variable(tf.random_normal([BATCH_SIZE])),
'bc2': tf.Variable(tf.random_normal([BATCH_SIZE*2])),
'bc3': tf.Variable(tf.random_normal([BATCH_SIZE*4])),
'bc4': tf.Variable(tf.random_normal([BATCH_SIZE*4])),
'bc5': tf.Variable(tf.random_normal([256])),
'bd': tf.Variable(tf.random_normal([4096])),
'bfc': tf.Variable(tf.random_normal([2*2*256])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Graph input
self.img_pl = tf.placeholder(tf.float32, [None, n_input, n_channels])
self.label_pl = tf.placeholder(tf.float32, [None, n_classes])
self.keep_prob_in = tf.placeholder(tf.float32)
self.keep_prob_hid = tf.placeholder(tf.float32)
# Create a saver for writing training checkpoints.
self.saver = tf.train.Saver()
# Batch function for Training - give the next batch of images and labels
def BatchIteratorTraining(self, batch_size):
imgs = []
labels = []
for img, label in self.train_imgs_lab:
imgs.append(img)
labels.append(label)
if len(imgs) == batch_size:
yield imgs, labels
imgs = []
labels = []
if len(imgs) > 0:
yield imgs, labels
# Batch function for Testing - give the next batch of images and labels
def BatchIteratorTesting(self, batch_size):
imgs = []
labels = []
for img, label in self.test_imgs_lab:
imgs.append(img)
labels.append(label)
if len(imgs) == batch_size:
yield imgs, labels
imgs = []
labels = []
if len(imgs) > 0:
yield imgs, labels
"""
Create AlexNet model
"""
def conv2d(self, name, l_input, w, b, s):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, s, s, 1], padding='SAME'), b), name=name)
def max_pool(self, name, l_input, k, s):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding='SAME', name=name)
def norm(self, name, l_input, lsize):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=2e-05, beta=0.75, name=name)
def alex_net_model(self, _X, _weights, _biases, input_dropout, hidden_dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, IMG_SIZE, IMG_SIZE, 3])
# Convolutional Layer 1
conv1 = self.conv2d('conv1', _X, _weights['wc1'], _biases['bc1'], s=4)
print "conv1.shape: ", conv1.get_shape()
# Max Pooling (down-sampling)
pool1 = self.max_pool('pool1', conv1, k=3, s=2)
print "pool1.shape:", pool1.get_shape()
# Apply Normalization
norm1 = self.norm('norm1', pool1, lsize=4)
print "norm1.shape:", norm1.get_shape()
# Apply Dropout
dropout1 = tf.nn.dropout(norm1, input_dropout)
tf.summary.histogram("weights", _weights['wc1'])
tf.summary.histogram("convolution", conv1 )
tf.summary.histogram("activation", norm1)
# Convolutional Layer 2
conv2 = self.conv2d('conv2', dropout1, _weights['wc2'], _biases['bc2'], s=1)
print "conv2.shape:", conv2.get_shape()
# Max Pooling (down-sampling)
pool2 = self.max_pool('pool2', conv2, k=3, s=2)
print "pool2.shape:", pool2.get_shape()
# Apply Normalization
norm2 = self.norm('norm2', pool2, lsize=4)
print "norm2.shape:", norm2.get_shape()
# Apply Dropout
#dropout2 = tf.nn.dropout(norm2, hidden_dropout)
tf.summary.histogram("weights", _weights['wc2'])
tf.summary.histogram("convolution", conv2 )
tf.summary.histogram("activation", norm2)
# Convolutional Layer 3
conv3 = self.conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'], s=1)
print "conv3.shape:", conv3.get_shape()
pool3 = self.max_pool('pool3', conv3, k=3, s=2)
norm3 = self.norm('norm3', pool3, lsize=4)
dropout3 = tf.nn.dropout(norm3, hidden_dropout)
tf.summary.histogram("weights", _weights['wc3'])
tf.summary.histogram("convolution", conv3 )
tf.summary.histogram("activation", norm3)
# Convolutional Layer 4
conv4 = self.conv2d('conv4', dropout3, _weights['wc4'], _biases['bc4'], s=1)
print "conv4.shape:", conv4.get_shape()
pool4 = self.max_pool('pool4', conv4, k=3, s=2)
norm4 = self.norm('norm4', pool4, lsize=4)
dropout4 = tf.nn.dropout(norm4, hidden_dropout)
tf.summary.histogram("weights", _weights['wc4'])
tf.summary.histogram("convolution", conv4 )
tf.summary.histogram("activation", norm4)
# Convolutional Layer 5
conv5 = self.conv2d('conv5', dropout4, _weights['wc5'], _biases['bc5'], s=1)
print "conv5.shape:", conv5.get_shape()
pool5 = self.max_pool('pool5', conv5, k=3, s=2)
tf.summary.histogram("convolution", conv5 )
# Fully connected layer 1
pool5_shape = pool5.get_shape().as_list()
print "pool5_shape: ", pool5.get_shape()
dense = tf.reshape(pool5, [-1, pool5_shape[1] * pool5_shape[2] * pool5_shape[3]])
print "dense.shape:", dense.get_shape().as_list()
fc1 = tf.nn.relu(tf.matmul(dense, _weights['wd']) + _biases['bd'], name='fc1') # Relu activation
print "fc1.shape:", fc1.get_shape()
#dropout6 = tf.nn.dropout(fc1, hidden_dropout) #
tf.summary.histogram("fully_connected", fc1)
# Fully connected layer 2
fc2 = tf.nn.relu(tf.matmul(fc1, _weights['wfc']) + _biases['bfc'], name='fc2') # Relu activation
print "fc2.shape:", fc2.get_shape()
dropout7 = tf.nn.dropout(fc2, hidden_dropout)
tf.summary.histogram("fully_connected", fc2)
# Output, class prediction LOGITS
out = tf.matmul(dropout7, _weights['out']) + _biases['out']
tf.summary.histogram("output", out)
# The function returns the Logits to be passed to softmax and the Softmax for the PREDICTION
return out
# Method for training the model and testing its accuracy
def training(self):
# Launch the graph
with tf.Session() as sess:
## Construct model: prepare logits, loss and optimizer ##
# logits: unnormalized log probabilities
logits = self.alex_net_model(self.img_pl, self.weights, self.biases, self.keep_prob_in, self.keep_prob_hid)
# loss: cross-entropy between the target and the softmax activation function applied to the model's prediction
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.label_pl))
tf.summary.scalar("cross-entropy_for_loss", loss)
# optimizer: find the best gradients of the loss with respect to each of the variables
train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=0.1).minimize(loss)
tf.summary.scalar("learning_rate", self.learning_rate)
print logits.get_shape(), self.label_pl.get_shape()
## Evaluate model: the degree to which the result of the prediction conforms to the correct value ##
# list of booleans
correct_pred = tf.equal(tf.argmax(logits,1), tf.argmax(self.label_pl, 1))
# [True, False, True, True] -> [1,0,1,1] -> 0.75
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
# Initializing the variables
init = tf.global_variables_initializer()
# Run the Op to initialize the variables.
sess.run(init)
summary_writer = tf.summary.FileWriter(CKPT_DIR, graph=sess.graph)
##################################################################
# collect imgs for validation
validation_imgs_batch = [b for i, b in enumerate(self.BatchIteratorTraining(BATCH_SIZE)) if i < 6]
# Run for epoch
for epoch in range(self.max_epochs):
print "epoch = %d" % epoch
log.info("Epoch %s" % epoch)
self.train_imgs_lab = Dataset.loadDataset(self.dataset_training) # necessary 'cause of the yeld
# Loop over all batches
for step, elems in enumerate(self.BatchIteratorTraining(BATCH_SIZE)):
print "step = %d" % step
### from iterator return batch lists ###
batch_imgs_train, batch_labels_train = elems
_, train_acc, train_loss, summary_op = sess.run([train_step, accuracy, loss, merged_summary_op], feed_dict={self.img_pl: batch_imgs_train, self.label_pl: batch_labels_train, self.keep_prob_in: 1.0, self.keep_prob_hid: 1.0})
summary_writer.add_summary(summary_op, epoch * step + i)
if step % self.display_step == 0:
log.info("Training Accuracy = " + "{:.5f}".format(train_acc))
log.info("Training Loss = " + "{:.6f}".format(train_loss))
print "Optimization Finished!"
# Save the models to disk
save_model_ckpt = self.saver.save(sess, MODEL_CKPT)
print("Model saved in file %s" % save_model_ckpt)
##################################################################
### Metrics ###
y_p = tf.argmax(logits,1) # the value predicted
target_names = ['class 0', 'class 1', 'class 2', 'class 3']
list_pred_total = []
list_true_total = []
# Accuracy Precision Recall F1-score by VALIDATION IMAGES
for step, elems in enumerate(validation_imgs_batch):
batch_imgs_valid, batch_labels_valid = elems
valid_acc, y_pred = sess.run([accuracy, y_p], feed_dict={self.img_pl: batch_imgs_valid, self.label_pl: batch_labels_valid, self.keep_prob_in: 1.0, self.keep_prob_hid: 1.0})
log.info("Validation accuracy = " + "{:.5f}".format(valid_acc))
list_pred_total.extend(y_pred)
y_true = np.argmax(batch_labels_valid,1)
list_true_total.extend(y_true)
# Classification Report (PRECISION - RECALL - F1 SCORE)
log.info("\n")
log.info(metrics.classification_report(list_true_total, list_pred_total, target_names=target_names))
# Network Input Values
log.info("Learning Rate " + "{:.4f}".format(self.learning_rate))
log.info("Number of epochs " + "{:d}".format(self.max_epochs))
print(metrics.classification_report(list_true_total, list_pred_total, target_names=target_names))
# ROC curve
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Recognition ROC curve')
plt.legend(loc="lower right")
plt.show()
def prediction(self):
with tf.Session() as sess:
# Construct model
pred = self.alex_net_model(self.img_pl, self.weights, self.biases, self.keep_prob_in, self.keep_prob_hid)
# Restore model.
ckpt = tf.train.get_checkpoint_state("ckpt_dir")
if(ckpt):
self.saver.restore(sess, MODEL_CKPT)
print "Model restored"
else:
print "No model checkpoint found to restore - ERROR"
return
### M ###
y_p = tf.argmax(pred,1) # the value predicted
target_names = ['class 0', 'class 1', 'class 2', 'class 3']
list_pred_total = []
list_true_total = []
# Accuracy Precision Recall F1-score by TEST IMAGES
for step, elems in enumerate(self.BatchIteratorTesting(BATCH_SIZE)):
batch_imgs_test, batch_labels_test = elems
y_pred = sess.run(y_p, feed_dict={self.img_pl: batch_imgs_test, self.keep_prob_in: 1.0, self.keep_prob_hid: 1.0})
print("batch predict = %d" % len(y_pred))
list_pred_total.extend(y_pred)
y_true = np.argmax(batch_labels_test,1)
print("batch real = %d" % len(y_true))
list_true_total.extend(y_true)
# Classification Report (PRECISION - RECALL - F1 SCORE)
log.info('\n')
log.info(metrics.classification_report(list_true_total, list_pred_total, target_names=target_names))
# Network Input Values
log.info("Learning Rate " + "{:.4f}".format(self.learning_rate))
log.info("Number of epochs " + "{:d}".format(self.max_epochs))
print(metrics.classification_report(list_true_total, list_pred_total, target_names=target_names))
### MAIN ###
def main():
np.random.seed(7)
parser = argparse.ArgumentParser(description='A convolutional neural network for image recognition')
subparsers = parser.add_subparsers()
training_args = [
(['-lr', '--learning-rate'], {'help':'learning rate', 'type':float, 'default':0.001}),
(['-e', '--max_epochs'], {'help':'max epochs', 'type':int, 'default':100}),
(['-ds', '--display-step'], {'help':'display step', 'type':int, 'default':10}),
(['-dtr', '--dataset_training'], {'help':'dataset training file', 'type':str, 'default':'images_shuffled.pkl'})
]
test_args = [
(['-dts', '--dataset_test'], {'help':'dataset test file', 'type':str, 'default':'images_test_dataset.pkl'})
]
# parser train
parser_train = subparsers.add_parser('train')
parser_train.set_defaults(which='train')
for arg in training_args:
parser_train.add_argument(*arg[0], **arg[1])
# parser preprocessing training data
parser_preprocess = subparsers.add_parser('preprocessing_training')
parser_preprocess.set_defaults(which='preprocessing_training')
parser_preprocess.add_argument('-f', '--file', help='output training file', type=str, default='images_dataset.pkl')
parser_preprocess.add_argument('-s', '--shuffle', help='shuffle training dataset', action='store_true')
parser_preprocess.set_defaults(shuffle=False)
# parser preprocessing test data
parser_preprocess = subparsers.add_parser('preprocessing_test')
parser_preprocess.set_defaults(which='preprocessing_test')
parser_preprocess.add_argument('-t', '--test', help='output test file', type=str, default='images_test_dataset.pkl')
# parser predict
parser_predict = subparsers.add_parser('predict')
parser_predict.set_defaults(which='predict')
for arg in test_args:
parser_predict.add_argument(*arg[0], **arg[1])
args = parser.parse_args()
# FILE LOG
log.basicConfig(filename='FileLog.log', level=log.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filemode="w")
# TRAINING & PREDICTION
if args.which in ('train', 'predict'):
t = timeit.timeit("Dataset.loadDataset(TRAIN_IMAGE_DIR)", setup="from __main__ import *")
# create the object ConvNet
if args.which == 'train':
# TRAINING
conv_net = ConvNet(learning_rate=args.learning_rate, max_epochs=args.max_epochs,
display_step=args.display_step, dataset_training=args.dataset_training)
# count total number of imgs in training
train_img_count = Dataset.getNumImages(TRAIN_IMAGE_DIR)
log.info("Training set num images = %d" % train_img_count)
conv_net.training()
else:
# PREDICTION
conv_net = ConvNet(dataset_test=args.dataset_test)
# count total number of imgs in test
test_img_count = Dataset.getNumImages(TEST_IMAGE_DIR)
log.info("Test set num images = %d" % test_img_count)
conv_net.prediction()
# PREPROCESSING TRAINING
elif args.which == 'preprocessing_training':
if args.shuffle:
l = [i for i in Dataset.loadDataset('images_dataset.pkl')]
np.random.shuffle(l)
Dataset.saveShuffle(l)
else:
Dataset.saveDataset(TRAIN_IMAGE_DIR, args.file)
# PREPROCESSING TEST
elif args.which == 'preprocessing_test':
Dataset.saveDataset(TEST_IMAGE_DIR, args.test)
if __name__ == '__main__':
main()
| |
### Standard library imports
from cmd import Cmd
import getpass
import re
### Local imports
from smartt_client import SmarttClient
from smartt_client import SmarttClientException
HEADERCOLORCODE = "\033[95m"
BLUECOLORCODE = "\033[94m"
GREENCOLORCODE = "\033[92m"
YELLOWCOLORCODE = "\033[93m"
REDCOLORCODE = "\033[91m"
ENDOFCOLORCODE = "\033[0m"
### SmarttConsole class - a console/shell application which interfaces with
### the Smartt server
class SmarttConsole(Cmd):
##########################################################################
### Configurations ###
######################
prompt = "smartt> "
def preloop(self):
self.smartt_client = SmarttClient()
##########################################################################
def splitArgs(self, arg):
extracted_values = re.findall('("([^"]*)")|(\S+)', arg)
values = [(value[2] if value[0] == "" else value[1])
for value in extracted_values]
return values
def printValue(self, value):
if isinstance(value, dict):
for (name, value) in value.iteritems():
print "%s: %s" % (name, value)
elif isinstance(value, list):
index = 0
for element in value:
print str(index) + ":"
self.printValue(element)
index += 1
print ""
else:
print value
def printResponse(self, response):
print BLUECOLORCODE
self.printValue(response)
print ENDOFCOLORCODE
##########################################################################
### Smartt Functions ###
########################
def do_login(self, arg):
splitted_args = self.splitArgs(arg)
if len(splitted_args) == 0:
print "Login not specified"
return
username = splitted_args[0]
print "Logging in as '%s'" % username
password = getpass.getpass()
self.printResponse(self.smartt_client.login(username, password))
def do_logged(self, arg):
self.printResponse(self.smartt_client.logged())
def do_logout(self, arg):
self.printResponse(self.smartt_client.logout())
def do_get_client(self, arg):
attributes = self.splitArgs(arg)
self.printResponse(self.smartt_client.getClient(attributes))
def do_get_time(self, arg):
self.printResponse(self.smartt_client.getTime())
def do_get_stock(self, arg):
splitted_args = self.splitArgs(arg)
stock_code = splitted_args[0]
market_name = splitted_args[1]
attributes = splitted_args[2:]
self.printResponse(
self.smartt_client.getStock(stock_code, market_name, attributes))
def do_send_order(self, arg):
splitted_args = self.splitArgs(arg)
print "Not implemented"
def do_cancel_order(self, arg):
splitted_args = self.splitArgs(arg)
order_id = splitted_args[0]
self.printResponse(self.smartt_client.cancelOrder(order_id))
def do_change_order(self, arg):
splitted_args = self.splitArgs(arg)
print "Not implemented"
def do_send_stop_order(self, arg):
splitted_args = self.splitArgs(arg)
print "Not implemented"
def do_cancel_stop_order(self, arg):
splitted_args = self.splitArgs(arg)
stop_order_id = splitted_args[0]
self.printResponse(self.smartt_client.cancelStopOrder(stop_order_id))
def do_get_orders(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getOrders())
def do_get_orders_events(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getOrdersEvents())
def do_get_stop_orders(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getStopOrders())
def do_get_stop_orders_events(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getStopOrdersEvents())
def do_get_trades(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getTrades())
def do_get_portfolio(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getPortfolio())
def do_get_available_limits(self, arg):
splitted_args = self.splitArgs(arg)
self.printResponse(self.smartt_client.getAvailableLimits())
##########################################################################
##########################################################################
### Lower level Smartt messaging ###
####################################
def do_message(self, arg):
message = self.splitArgs(arg)
self.smartt_client.sendMessage(message)
print self.smartt_client.receiveMessage()
def do_query(self, arg):
self.do_message(arg)
def do_rawmessage(self, arg):
self.smartt_client.sendRawMessage(arg)
print self.smartt_client.receiveRawMessage()
def do_rawquery(self, arg):
self.do_rawmessage(arg)
##########################################################################
### Quitting commands ###
#########################
def do_EOF(self, arg):
print ""
return True
def do_quit(self, arg):
return True
def do_exit(self, arg):
return True
##########################################################################
##########################################################################
### Wrappers and overloaded functions ###
#########################################
def onecmd(self, line):
### Wraps commands and catches exceptions
try:
return Cmd.onecmd(self, line)
except SmarttClientException as e:
print REDCOLORCODE + str(e) + ENDOFCOLORCODE
return False
def emptyline(self):
### Empty lines do nothing - default is to run last command
pass
def default(self, line):
### Default message - unknown command
print(REDCOLORCODE
+ "Unknown command: '{0}' - you sure you typed that right?"
.format(line) + ENDOFCOLORCODE)
##########################################################################
##############################################################################
##############################################################################
### Main function - application starting point ###
##################################################
def main():
smartt_console = SmarttConsole()
smartt_console.cmdloop("Welcome to the Smartt Client Console!")
##############################################################################
### If trying to run from here, you're welcome!
if __name__ == "__main__":
main()
| |
import bpy
import array
import numpy
import os
import xml.etree.ElementTree as element_tree
import struct
import bpy_extras
from bpy_extras.io_utils import ExportHelper
import bmesh
import shutil
import xml.dom.minidom as minidom
import mathutils
_texture_mapping = dict()
# Function that makes xml trees pretty for printing
def make_pretty_xml(elem):
rough_string = element_tree.tostring(elem, encoding="us-ascii")
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
# Make sure a given object is trangulated
def triangulate_object(obj):
me = obj.data
# Get a BMesh representation
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method=0, ngon_method=0)
# Finish up, write the bmesh back to the mesh
bm.to_mesh(me)
bm.free()
def to_y_up(matrix):
mm = bpy_extras.io_utils.axis_conversion(from_forward='Y', from_up='Z', to_forward='-Z', to_up='Y')
om = matrix.to_3x3()
t = mm * om
output_mat = mathutils.Matrix()
output_mat[0][0] = t[0][0]
output_mat[0][1] = t[0][1]
output_mat[0][2] = t[0][2]
output_mat[0][3] = matrix[0][3]
output_mat[1][0] = t[1][0]
output_mat[1][1] = t[1][1]
output_mat[1][2] = t[1][2]
output_mat[1][3] = matrix[2][3]
output_mat[2][0] = t[2][0]
output_mat[2][1] = t[2][1]
output_mat[2][2] = t[2][2]
output_mat[2][3] = -matrix[1][3]
output_mat[3][0] = 0
output_mat[3][1] = 0
output_mat[3][2] = 0
output_mat[3][3] = 1
return output_mat
def mat_to_string(matrix):
y_up_mat = to_y_up(matrix)
out_str = str(y_up_mat[0][0])
out_str += " "
out_str += str(y_up_mat[0][1])
out_str += " "
out_str += str(y_up_mat[0][2])
out_str += " "
out_str += str(y_up_mat[0][3])
out_str += " "
out_str += str(y_up_mat[1][0])
out_str += " "
out_str += str(y_up_mat[1][1])
out_str += " "
out_str += str(y_up_mat[1][2])
out_str += " "
out_str += str(y_up_mat[1][3])
out_str += " "
out_str += str(y_up_mat[2][0])
out_str += " "
out_str += str(y_up_mat[2][1])
out_str += " "
out_str += str(y_up_mat[2][2])
out_str += " "
out_str += str(y_up_mat[2][3])
out_str += " "
out_str += str(y_up_mat[3][0])
out_str += " "
out_str += str(y_up_mat[3][1])
out_str += " "
out_str += str(y_up_mat[3][2])
out_str += " "
out_str += str(y_up_mat[3][3])
return out_str
def vector_to_string(vector3):
out_str = str(vector3[0])
out_str += " "
out_str += str(vector3[1])
out_str += " "
out_str += str(vector3[2])
out_str += " 1"
return out_str
def export_geometry(mesh_data, output_file_name, matrix, dupli_offset):
# Lets open our target file
fout = open(output_file_name, 'w')
# Our Header
fout.write("# Donut wavefront exporter\n")
fout.write("\n")
# Write all the positions
for v in mesh_data.vertices:
transformed_pos = matrix * v.co + dupli_offset
fout.write("v %.4f %.4f %.4f\n" % transformed_pos[:])
fout.write("\n")
# The internal blender format supports multiple UV channels
# The first one is used for the Diffuse, NormalMap, ARM and the second one
# (if available) is used for lightmaps
if (len(mesh_data.uv_layers) > 0) :
for tex_coord in mesh_data.uv_layers[0].data:
fout.write("vt %.4f %.4f\n" % tex_coord.uv[:])
else:
for loop in mesh_data.polygons:
fout.write("vt %.4f %.4f\n" % (0, 0))
fout.write("vt %.4f %.4f\n" % (0, 0))
fout.write("vt %.4f %.4f\n" % (0, 0))
fout.write("\n")
# Finally write all the faces and their indexes
face_idx = 0
for p in mesh_data.polygons:
fout.write("f ")
vert_idx = 0
for i in p.vertices:
fout.write("%d/%d " % (i + 1, 3 * face_idx + vert_idx + 1))
vert_idx = vert_idx + 1
face_idx = face_idx + 1
fout.write("\n")
def export_sugar(sugar_name, geometry_relative_path, material_name, project_dir):
print("Exporting sugar: " + sugar_name)
# Building the sugar xml file
sugar_node = element_tree.Element("sugar", name=sugar_name)
renderables_node = element_tree.SubElement(sugar_node, "renderables")
renderable_node = element_tree.SubElement(renderables_node, "renderable", id="0")
geometry_node = element_tree.SubElement(renderable_node, "geometry", location=geometry_relative_path)
material_node = element_tree.SubElement(renderable_node, "material", name=material_name)
sugar_dir = project_dir + "/sugars/" + sugar_name + ".sugar"
fout = open(sugar_dir, 'w')
fout.write(make_pretty_xml(sugar_node))
def export_material(material_name, material, output_topping_path):
print("Exporting topping: " + material_name)
# Lets define if this material uses any texture
uses_texture = False
mat_texture_list = []
if material and material.use_nodes:
for n in material.node_tree.nodes:
if n.type == 'TEX_IMAGE':
uses_texture = True
mat_texture_list.append(_texture_mapping[n.image.filepath]);
# Building the topping xml file
topping_node = element_tree.Element("topping", name=material_name)
# Create the shaders
shader_node = element_tree.SubElement(topping_node, "shader")
if uses_texture:
vertex_node = element_tree.SubElement(shader_node, "vertex", location="common/shaders/base/vertex.glsl")
geometry_node = element_tree.SubElement(shader_node, "geometry", location="common/shaders/base/geometry.glsl")
fragment_node = element_tree.SubElement(shader_node, "fragment", location="common/shaders/base/fragment.glsl")
textures_node = element_tree.SubElement(topping_node, "textures")
color_node = element_tree.SubElement(textures_node, "texture2D", name="textureCmp", location=mat_texture_list[0])
else:
vertex_node = element_tree.SubElement(shader_node, "vertex", location="common/shaders/uniform_color/vertex.glsl")
fragment_node = element_tree.SubElement(shader_node, "fragment", location="common/shaders/uniform_color/fragment.glsl")
extern_data_node = element_tree.SubElement(topping_node, "extern_data")
color_str = "1 1 1 1"
if material != None:
color_str = vector_to_string(material.diffuse_color)
color_node = element_tree.SubElement(extern_data_node, "data", type="vec4", name="uniform_color", value=color_str)
fout = open(output_topping_path, 'w')
fout.write(make_pretty_xml(topping_node))
def export_element(target_object, root_node, target_path, project_name, project_dir):
if(target_object.library):
print("Object in library")
# Export name
asset_name = target_object.name.lower()
print("Exporting sugar: " + asset_name)
# Create the scene node that holds it
new_scene_node = element_tree.SubElement(root_node, "node", TM=mat_to_string(target_object.matrix_world))
sugar_instance = element_tree.SubElement(new_scene_node, "model", sugar=asset_name)
# Make sure the geometry is triangulated first
triangulate_object(target_object)
# Fetching the actual geometry
mesh_name = target_object.data.name.lower()
# Fetch the materials of this mesh
current_material = target_object.active_material
material_name = asset_name + "_material"
material_output_path = project_dir + "/toppings/" + material_name + ".topping"
export_material(material_name, current_material, material_output_path)
# Output the geometry
geometry_output_path = project_dir + "/geometries/" + mesh_name + ".obj"
export_geometry(target_object.data, geometry_output_path, mathutils.Matrix.Identity(4), mathutils.Vector((0,0,0)))
# Export the sugar that matches this element
export_sugar(asset_name, project_name + "/geometries/" + mesh_name + ".obj", material_name, project_dir)
def export_scene (target_scene, target_path, project_name, project_dir):
# Create the level file
flour_file = os.path.join(project_dir, "default.flour")
print ("The exported flour will be at " + flour_file)
# Create all the required nodes of the flour
flour_node = element_tree.Element("flour", name="blender_flour")
root_node = element_tree.SubElement(flour_node, "node")
pipeline_node = element_tree.SubElement(flour_node, "pipeline", name="complete")
illumination_node = element_tree.SubElement(flour_node, "illumination")
for obj in target_scene.objects:
# If it has a mesh create a sugar for it
if (obj.type == "MESH"):
export_element(obj, root_node, target_path, project_name, project_dir)
# If it has a dupli group
if(obj.dupli_group):
group_name = obj.dupli_group.name.lower() + "_group"
new_scene_node = element_tree.SubElement(root_node, "node", TM=mat_to_string(obj.matrix_world))
sugar_instance = element_tree.SubElement(new_scene_node, "model", sugar=group_name)
# Build the tree and export it
fout = open(flour_file, 'w')
fout.write(make_pretty_xml(flour_node))
def export_groups(target_path, project_name):
# Go through the groups
for group in bpy.data.groups:
# Evaluate the group asset-name
group_name = group.name.lower() + "_group"
# Building the sugar xml
sugar_node = element_tree.Element("sugar", name=group_name)
renderables_node = element_tree.SubElement(sugar_node, "renderables")
# Index of the renderable
geo_idx = 0
# Loop through the elements of this object
for obj in group.objects:
# If the object is a mesh, process it as a mesh
if (obj.type == "MESH"):
# triangulate the object first
triangulate_object(obj)
# Generate the name for the matching assets
geometry_name = obj.name.lower()
material_name = obj.name.lower() + "_material"
geometry_relative_path = project_name + "/geometries/" + geometry_name + "_group.obj"
geometry_absolute_path = target_path + geometry_relative_path
material_absolute_path = target_path + project_name + "/toppings/" + material_name + "_group.topping"
# Create a renderable node for it
renderable_node = element_tree.SubElement(renderables_node, "renderable", id=str(geo_idx))
geometry_node = element_tree.SubElement(renderable_node, "geometry", location=geometry_relative_path)
material_node = element_tree.SubElement(renderable_node, "material", name=material_name)
# Write the geometry file
export_geometry(obj.data, geometry_absolute_path, obj.matrix_world, group.dupli_offset)
geo_idx += 1
# Write the material file
export_material(material_name, obj.active_material, material_absolute_path)
# Generate the output filename of the sugar file
sugar_path = target_path + project_name + "/sugars/" + group_name + ".sugar"
fout = open(sugar_path, 'w')
fout.write(make_pretty_xml(sugar_node))
def export_textures(target_path, project_name):
# Go through the images
for image_var in bpy.data.images:
# Get the absolute path of the image
image_absolute_path = image_var.filepath_from_user()
# If the file exists
if os.path.isfile(image_absolute_path):
# Get the basename of the file
base_name = os.path.basename(image_absolute_path)
output_image_path = target_path + project_name + "/textures/" + base_name
shutil.copyfile(os.path.realpath(bpy.path.abspath(image_absolute_path)), output_image_path)
# Add it to the mapping
_texture_mapping[image_var.filepath] = project_name + "/textures/" + base_name
def create_project_structure(project_dir):
print("Project will be exported to " + project_dir)
# Destroy the hosting folder if it exists
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Make sure
while os.path.exists(project_dir):
pass
os.makedirs(project_dir)
os.makedirs(project_dir + "/geometries")
os.makedirs(project_dir + "/sugars")
os.makedirs(project_dir + "/toppings")
os.makedirs(project_dir + "/textures")
def export_donut_project(target_path, project_name):
# Build the projet directory
project_dir = target_path + "/" + project_name
# First of all we need to create the donut project (file hierarchy)
create_project_structure(project_dir)
# Copy all the texture files used by this project
export_textures(target_path, project_name)
# Lets go through all the groups and flatten each one of them as a sugar
export_groups(target_path, project_name)
# Export all the scene items
export_scene(bpy.context.scene, target_path, project_name, project_dir)
# Gather the required data
target_path = "C:/TEMP/"
project_name = "blender_scene"
# Export the scene into a donut project
export_donut_project(target_path, project_name)
| |
"""
This is a bottom-up chart parser for a fragment of English.
It uses the active chart datastructure. The design is based
on Steve Isard's LIB CHART, a teaching tool (written in 1983) that
comes with the wonderful Poplog AI development environment.
This has been adjusted to work with a lattice of input possibilities,
and to have a simple feature system.
References
----------
The original LIB CHART [1]_ and the Poplog website [2]_
.. [1] http://www.poplog.org/gospl/packages/pop11/lib/chart.p
.. [2] http://www.poplog.org
Examples
--------
>>> parse(["the","pigeons",'are','punished','and','they','suffer'])
['the', 'pigeons', 'are', 'punished', 'and', 'they', 'suffer']
Parse 1:
S
S
Np
det the
Nn
n pigeons
cop are
ppart punished
conj and
S
Np
pn they
Vp
v suffer
1 parses
>>> parse(["the","pigeons",'are','punished','and','they','suffer',"and","they","suffer"])
['the', 'pigeons', 'are', 'punished', 'and', 'they', 'suffer', 'and', 'they', 'suffer']
Parse 1:
S
S
S
Np
det the
Nn
n pigeons
cop are
ppart punished
conj and
S
Np
pn they
Vp
v suffer
conj and
S
Np
pn they
Vp
v suffer
Parse 2:
S
S
Np
det the
Nn
n pigeons
cop are
ppart punished
conj and
S
S
Np
pn they
Vp
v suffer
conj and
S
Np
pn they
Vp
v suffer
2 parses
"""
##
# Created 10 March 2014
# author: Chris Brew
# author: Stephen Isard
# license: Apache 2.0
##
from collections import defaultdict, namedtuple
from english import GRAMMAR
import features
import english
from features import ImmutableCategory as icat
import operator
import itertools
import heapq
def hpush(heap,item):
"""
Simple list based alternative to heapq.heappop()
Reduces need for comparisons in agenda. Much
faster, and all current tests pass.
"""
heap.append(item)
def hpop(heap):
"""
Simple list based alternative to heapq.heappop().
Reduces need for comparisons in agenda. Much
faster, and all current tests pass.
"""
return heap.pop()
# from heapq import heappop as hpop
# from heapq import heappush as hpush
class LinearWords(object):
"""
A class that implements the finite state machine abstraction
for the easy case of a linear sequence of words.
This is the protocol that the any finite state machine must
implement for input. It must have a final state, with a number ``n``,
and previous states from 0 to n-1.
"""
def __init__(self, words):
self.words = words
@property
def final_state(self):
return len(self.words)
def arcs(self):
"""
Enumerate the arcs of simple linear finite-state machine.
"""
for i,w in enumerate(self.words):
yield i,w , i+1
from edges import Edge
class Chart(object):
"""An active chart parser.
Parameters
----------
words: list of string.
the words to be parsed.
grammar: Grammar
the grammar to parse against.
verbose: boolean
provide more logging if true.
using_features: boolean
use categories with features on them if true.
Attributes
----------
partials: list<set<Edge>>
a list of sets of partial edges ending in
position i are stored in partials[i]
completes: list<set<Edge>>
a list of sets of complete edges
starting in position i are stored in completes[i]
prev: defaultdict of set of Edge
mapping from edges to the complete edges that
gave rise to them: empty for edges not created by fundamental rule
agenda: priority queue of edges
The list of edges still remaining to be incorporated.
"""
def __init__(self, words,
grammar=GRAMMAR,
verbose=False,
input_source=LinearWords,
run=True,
using_features=False):
"""
Create and run the parser.
"""
self.using_features = using_features
self.input_source = input_source
self.verbose = verbose
self.grammar = grammar.grammar
self.prev = defaultdict(set)
self.countdict = defaultdict(int)
self.agenda = []
self.seed_agenda(words)
if self.using_features:
self.compat = self.compatible
else:
self.compat = operator.eq
if run:
while self.agenda:
item = hpop(self.agenda)
if self.verbose:
print item #pragma no cover
self.incorporate(item)
def show(self):
for p in self.partials:
for e in p:
print e
for c in self.completes:
for e in c:
print e
def setup_words(self, words):
"""
Instantiate the source of words.
"""
if self.using_features:
words = [icat.from_string(w) for w in words]
return self.input_source(words)
def seed_agenda(self, words):
"""
Go through the words, seeding the agenda.
Uses an interface where the
object that introduces the words is a finite-state
machine whose arcs can be enumerated.
"""
words = self.setup_words(words)
final_state = words.final_state
self.partials = [set() for _ in range(final_state + 1)]
self.completes = [set() for _ in range(final_state + 1)]
for i,w,j in words.arcs():
hpush(self.agenda,self.lexical(i,w,j))
def lexical(self, i, word, j):
"""
Create a lexical edge based on `word`.
Parameters
----------
word: string
the word to base the edge on,
i: integer
where the edge starts
j: integer
where the edge ends
"""
return Edge(label=word, left=i, right=j, needed=(),constraints=None)
def solutions(self, topCat,n=None):
"""
Find the solutions rooted in `topCat`
Parameters
----------
topCat: string
the symbol that the sentence should be rooted in.
Returns
-------
solutions:list<Edge>
"""
r = [e for e in self.completes[0] if
e.right == len(self.completes) - 1 and self.compat(topCat,e.label)]
if n is not None:
return r[n]
else:
return r
def add_prev(self, e, c):
"""
Record information about a **complete** predecessor of an edge.
Taken together with the edge itself, this lets the
**partial** partner be reconstructed.
Parameters
----------
e: Edge
an edge that has just been made.
c: Edge
a predecessor of `e`, not necessarily the only one.
Returns
-------
e: Edge
the edge whose information has just been recorded.
"""
self.prev[e].add(c)
return e
def get_prev(self, e):
"""
Return the predecessors of an edge.
Parameters
----------
e: Edge
the edge whose predecessors are desired.
Returns
-------
edges : set [Edge]
the predecessors of `e`
"""
return self.prev[e]
def pairwithpartials(self, partials, e):
"""
Run the fundamental rule for everything in
`partials` that goes with `e`.
Updates the `agenda` by adding to its end.
Parameters
----------
partials: set<Edge>
the potential partners of `e`
e: Edge
The complete edge that should be augmented.
"""
for p in partials:
if self.compat(e.label,p.needed[0]):
newedge = Edge(label=p.label,
left=p.left,
right=e.right,
needed=p.needed[1:],
constraints=p.constraints)
if self.using_features:
newedge = newedge.percolate(e.label)
hpush(self.agenda,
self.add_prev(newedge, e))
def pairwithcompletes(self, e, completes):
"""
Run the fundamental rule for everything in
`completes` that goes with `e`.
Updates the `agenda` by adding to its end.
Probabilities, if present, are propagated.
Updates the `agenda`.
:type completes: set<Edge>
:param completes: the potential partners of e
:type e: Edge
:param e: The partial edge that should be completed.
"""
for c in completes:
if self.compat(e.needed[0],c.label):
newedge = Edge(label=e.label, left=e.left,
right=c.right,
needed=e.needed[1:],
constraints=e.constraints)
if self.using_features:
newedge = newedge.percolate(c.label)
hpush(self.agenda,self.add_prev(newedge, c))
def compatible(self,rule_category, chart_category):
"""
Compatibility check. Called only when features are being used.
"""
return (rule_category.cat == chart_category.cat) and rule_category.fcheck(chart_category)
def spawn(self, lc, i):
"""
Spawn empty edges at `i` from the rules that match `lc`.
a spawned edge need only be added the first time that
it is predicted.
Updates the `agenda`.
Parameters
----------
lc: string or Category
the label of the left corner item to spawn from.
i: integer
the index of the cell where the empty edges are to go.
Examples
--------
>>> ch = Chart([])
>>> ch.spawn('Np', 0)
>>> sorted(ch.agenda)[0]
P(Np, 0, 0,('Np', 'Pp'))
"""
for rule in self.grammar:
lhs = rule.lhs
rhs = rule.rhs
if self.compat(rhs[0], lc):
e = Edge(label=lhs, left=i, right=i,
needed=tuple(rhs),
constraints=rule.constraints
)
if e not in self.somepartials(right=e.left):
self.prev[e] = set()
hpush(self.agenda,e)
def find(self,e):
if e.iscomplete():
edges = self.completes[e.left]
else:
edges = self.partials[e.right]
# there will be zero or one edge in the chart that satisfies
# the criteria...
for edge in edges:
if edge.left == e.left and edge.right == e.right and self.compat(edge.label,e.label) and self.allcompatible(edge.needed, e.needed):
return edge
def membership_check(self, e, previous):
"""
Check whether edge or equivalent
is present.
Four cases
1) edge is present, return True and original set.
2) edge is entirely absent: return False and original set.
3) edge is less general than one in the set, return True and original set
4) edge is more general than one in the set, return True and modified set
that replaces the more specific with the new edge.
"""
if e in previous:
return True,previous
if not self.using_features:
return False,previous
for p in previous:
if e.less_general(p):
return True,previous
elif p.less_general(e):
return True, (previous - set([p])) | set([e])
return False,previous
def incorporate(self, e):
"""
Add e to the chart and trigger all corresponding actions.
Parameters
----------
e: Edge
the edge to be added.
Examples
--------
>>> ch = Chart(['the'])
>>> ch.incorporate(Edge('s',0,0,('banana',),None))
>>> ch.incorporate(Edge('s',0,0,('banana',),None))
>>> sorted(ch.partials[0])[-1:]
[P(s, 0, 0,('banana',))]
>>> ch = Chart([])
>>> ch.incorporate(Edge('np',0,1,(),None))
>>> ch.incorporate(Edge('np',0,1,(),None))
>>> ch.completes[0]
set([C(np, 0, 1)])
"""
if e.iscomplete():
flag,self.completes[e.left] = self.membership_check(e, self.completes[e.left])
if flag: # no new edge needs to be added
pass
else:
self.completes[e.left].add(e)
# TODO the empty edge produced by spawn
# will immedidately combine with e
# so we could make the result directly.
self.spawn(e.label, e.left)
self.pairwithpartials(self.somepartials(right=e.left), e)
elif e.ispartial():
flag,self.partials[e.right] = self.membership_check(e, self.partials[e.right])
if flag: # no new edge needs to be added
pass
else:
self.partials[e.right].add(e)
self.pairwithcompletes(e, self.completes[e.right])
else:
raise "Huh? edge has to be either partial or complete!" #pragma no cover
def allcompatible(self,cs1,cs2):
if len(cs1) != len(cs2):
return False
for c1,c2 in zip(cs1,cs2):
if not self.compat(c1,c2):
return False
return True
def count_edges(self,sol=None):
"""
Recursive (memoized) procedure to find counts
of edges.
XXX Answer is not sensible parsing of infinite strings. The procedure runs but is misleading.
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 4)).split(),return_chart=True, print_trees=False)
>>> v.count_edges()
14
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 5)).split(),return_chart=True, print_trees=False)
>>> v.count_edges()
42
"""
if sol is None:
self._traced = dict()
s = 0
for sol in self.solutions(self.topcat):
self.count_edges(sol=sol)
s += self._traced[sol]
return s
elif sol in self._traced:
pass
else:
ps = self.prev[sol]
if ps:
self._traced[sol] = 0
for e in ps:
probe = Edge(label=sol.label,
left=sol.left,
right=e.left,
needed = (e.label,) + sol.needed,
constraints=sol.constraints)
probe = self.find(probe)
self.count_edges(sol=probe)
self.count_edges(sol=e)
self._traced[sol] += self._traced[e] * self._traced[probe]
else:
self._traced[sol] = 1
def count(self,e):
"""
Count the trees that are rooted in edge.
Parameters
==========
e: Edge
the chart entry whose analyses we count.
Tests
=====
Check that counting of ambiguous parses works as advertised. Numbers are, as theory dictates
from the Catalan series, which grows very fast.
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 0)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
1
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 1)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
1
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 2)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
2
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 3)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
5
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 4)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
14
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 5)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
42
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 6)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
132
>>> v = parse(('the pigeons are punished' + ( ' and they suffer' * 7)).split(), use_features=True, print_trees=False, return_chart=True)
>>> v.count(v.solutions(v.topcat,0))
429
"""
if not hasattr(self,'_traced'):
self.count_edges()
return self._traced[e]
def somepartials(self,right=None,left=None,label=None,first=None,rest=None):
"""
Accessor that gets a set of relevant partials.
"""
if right is not None:
r = self.partials[right]
else:
r = set().union(*self.partials)
if left is not None:
r = {e for e in r if e.left==left}
if label is not None:
r = {e for e in r if self.compat(e.label,label)}
if first is not None:
r = {e for e in r if self.compat(e.needed[0],first)}
if rest is not None:
r = {e for e in r if self.allcompatible(e.needed[1:],rest)}
return frozenset(r)
def trees_debug(self,e):
import ipdb; ipdb.set_trace()
for t in self.trees(e):
print t
def trees(self, e):
"""
Generate the trees that are rooted in edge.
Parameters
==========
e: Edge
the chart entry whose daughters we trace.
This is an iterator, and can unpack the first few of even very
ambiguous parse forests. The following is too costly to run as
a doctest, but does work.
from math import log10
v = chart.parse(('the pigeons are punished' + ( ' and they suffer' * 152)).split(),return_chart=True,
print_trees=False,show_chart=False)
print log10(v.count_edges())
87.98857337128997
ts = v.trees(v.solutions(v.topcat)[0])
print len((treestring(ts.next()))
16522
Currently, this does not work when the parse forest is infinite. See `demo_arcs2` in lattice. This
has an infinite recursion for C(S,0,4) when the input FSA is
0 the 1 pigeons 2 are 3 punished 4 (and 5 they 6 suffer 7)
3 punished 7 6 suffer 4
That is, the final state is 7.
S(0,4) does have an infinite yield, so this is not a big surprise.
"""
prev = self.get_prev(e)
if prev:
for c in prev:
for p in self.somepartials(right=c.left,left=e.left,label=e.label,first=c.label,rest=e.needed):
for left in self.trees(p):
for right in self.trees(c):
yield Tree(e.label,left.children + tuple([right]))
else:
yield Tree(e.label)
def results(self,**kwds):
"""
Code for creating results.
"""
return dict(sols=self.solutions(self.topcat),
n_trees=self.count_edges(),
topcat=self.topcat)
class Tree(object):
"""
Container for syntax trees.
Attributes
----------
parent: string
label of parent node.
children: tuple<Tree>
the subtrees (possibly empty).
"""
["parent", "children"]
def __init__(self, parent, children=()):
self.parent = parent
self.children = children
def __str__(self):
"""
>>> print Tree("S",[Tree("NP"),Tree("VP")])
S
NP
VP
<BLANKLINE>
"""
return treestring(self)
def treestring(t, tab=0,sep=' '):
"""
Return a string representation of a syntax tree.
Print preterminals on same line as their terminals
(e.g. n dog)
Use indentation to signal nesting level.
Parameters
==========
t: syntax tree
The tree to be printed.
Examples
========
>>> print treestring(Tree("S",[Tree("NP"),Tree("VP")]))
S
NP
VP
<BLANKLINE>
>>> print treestring(Tree("S",[Tree("NP"),Tree("VP")]),sep='_')
S
_NP
_VP
<BLANKLINE>
"""
if len(t.children) == 1 and t.children[0].children == ():
s = (sep * tab) + str(t.parent) + ' ' + str(t.children[0].parent) + '\n'
else:
s = (sep * tab) + str(t.parent) + '\n'
for child in t.children:
s += treestring(child, tab=tab + 1,sep=sep)
return s
def parse(sentence, verbose=False, topcat='S', grammar=GRAMMAR,sep=' ', input_source=LinearWords,
use_features=False,show_chart=False,print_trees=True,return_chart=False,
trace_edges=True,
return_trees = False):
"""
Print out the parses of a sentence
Parameters
----------
sentence: list<string>
the words to be parsed.
Examples
--------
>>> parse(["the","pigeons",'are','punished','and','they','suffer'])
['the', 'pigeons', 'are', 'punished', 'and', 'they', 'suffer']
Parse 1:
S
S
Np
det the
Nn
n pigeons
cop are
ppart punished
conj and
S
Np
pn they
Vp
v suffer
1 parses
>>> parse(["the","pigeons",'are','punished','and','they','blink'])
['the', 'pigeons', 'are', 'punished', 'and', 'they', 'blink']
No parse
"""
if use_features:
grammar = features.make_feature_grammar()
topcat = icat.from_string(topcat)
v = Chart(sentence, verbose=verbose,grammar=grammar,input_source=input_source, using_features=use_features)
v.topcat = topcat
sols = v.solutions(topcat)
res = v.results(show_chart=show_chart,
print_trees=print_trees,
trace_edges=trace_edges)
silent = not (print_trees or show_chart)
if not silent:
print sentence
if show_chart:
v.show()
if print_trees:
i = 0
for e in sols:
for tree in v.trees(e):
i += 1
if print_trees:
print "Parse %d:" % i
print treestring(tree, tab=0, sep=sep),
if not silent:
if res['n_trees'] == 0:
print "No parse"
else:
print res['n_trees'], "parses"
if return_chart:
return v
else:
return None
def edge_summary(v):
"""
Summarize the contents of the chart.
>>> v = parse(["the","pigeons","suffer"],return_chart=True)
['the', 'pigeons', 'suffer']
Parse 1:
S
Np
det the
Nn
n pigeons
Vp
v suffer
1 parses
>>> edge_summary(v)
{'partials': 31, 'completes': 12}
"""
ps = set().union(*v.partials)
cs = set().union(*v.completes)
ps_no_pred = {p for p in ps if p not in v.prev}
cs_no_pred = {p for p in cs if p not in v.prev}
assert len(cs_no_pred) == 0
assert len(ps_no_pred) == 0
return dict(partials= len(ps),completes=len(cs))
def augment_prev(prev): #pragma no cover
"""
Create an edge pair that could have produced each of the edges in prev.
If features are being used, it may not be exactly the edges that were used,
but will be compatible with them.
"""
def _augment(e,cs):
return e,{(Edge(e.label,e.left,c.left,(c.label,) + e.needed,constraints=e.constraints),c) for c in cs}
return dict([_augment(e,cs) for e,cs in prev.items()])
| |
"""The tests for the Alexa component."""
# pylint: disable=protected-access
import asyncio
import json
import datetime
import pytest
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from homeassistant.components import alexa
SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000"
APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe"
REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000"
# pylint: disable=invalid-name
calls = []
NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3"
@pytest.fixture
def alexa_client(loop, hass, test_client):
"""Initialize a Home Assistant server for testing this module."""
@callback
def mock_service(call):
calls.append(call)
hass.services.async_register("test", "alexa", mock_service)
assert loop.run_until_complete(async_setup_component(hass, alexa.DOMAIN, {
# Key is here to verify we allow other keys in config too
"homeassistant": {},
"alexa": {
"flash_briefings": {
"weather": [
{"title": "Weekly forecast",
"text": "This week it will be sunny."},
{"title": "Current conditions",
"text": "Currently it is 80 degrees fahrenheit."}
],
"news_audio": {
"title": "NPR",
"audio": NPR_NEWS_MP3_URL,
"display_url": "https://npr.org",
"uid": "uuid"
}
},
"intents": {
"WhereAreWeIntent": {
"speech": {
"type": "plaintext",
"text":
"""
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plaintext",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"AMAZON.PlaybackAction<object@MusicCreativeWork>": {
"speech": {
"type": "plaintext",
"text": "Playing {{ object_byArtist_name }}.",
}
},
"CallServiceIntent": {
"speech": {
"type": "plaintext",
"text": "Service called",
},
"action": {
"service": "test.alexa",
"data_template": {
"hello": "{{ ZodiacSign }}"
},
"entity_id": "switch.test",
}
}
}
}
}))
return loop.run_until_complete(test_client(hass.http.app))
def _intent_req(client, data={}):
return client.post(alexa.INTENTS_API_ENDPOINT, data=json.dumps(data),
headers={'content-type': 'application/json'})
def _flash_briefing_req(client, briefing_id):
return client.get(
"/api/alexa/flash_briefings/{}".format(briefing_id))
@asyncio.coroutine
def test_intent_launch_request(alexa_client):
"""Test the launch of a request."""
data = {
"version": "1.0",
"session": {
"new": True,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "LaunchRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z"
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
resp = yield from req.json()
assert "outputSpeech" in resp["response"]
@asyncio.coroutine
def test_intent_request_with_slots(alexa_client):
"""Test a request with slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False
}
},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "virgo"
}
}
}
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
data = yield from req.json()
text = data.get("response", {}).get("outputSpeech",
{}).get("text")
assert text == "You told us your sign is virgo."
@asyncio.coroutine
def test_intent_request_with_slots_but_no_value(alexa_client):
"""Test a request with slots but no value."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False
}
},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
}
}
}
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
data = yield from req.json()
text = data.get("response", {}).get("outputSpeech",
{}).get("text")
assert text == "You told us your sign is ."
@asyncio.coroutine
def test_intent_request_without_slots(hass, alexa_client):
"""Test a request without slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False
}
},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "WhereAreWeIntent",
}
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
json = yield from req.json()
text = json.get("response", {}).get("outputSpeech",
{}).get("text")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
json = yield from req.json()
text = json.get("response", {}).get("outputSpeech",
{}).get("text")
assert text == "You are both home, you silly"
@asyncio.coroutine
def test_intent_request_calling_service(alexa_client):
"""Test a request for calling a service."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "CallServiceIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "virgo",
}
}
}
}
}
call_count = len(calls)
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
assert call_count + 1 == len(calls)
call = calls[-1]
assert call.domain == "test"
assert call.service == "alexa"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
@asyncio.coroutine
def test_intent_session_ended_request(alexa_client):
"""Test the request for ending the session."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {
"applicationId": APPLICATION_ID
},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False
}
},
"user": {
"userId": "amzn1.account.AM3B00000000000000000000000"
}
},
"request": {
"type": "SessionEndedRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"reason": "USER_INITIATED"
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
text = yield from req.text()
assert text == ''
@asyncio.coroutine
def test_intent_from_built_in_intent_library(alexa_client):
"""Test intents from the Built-in Intent Library."""
data = {
'request': {
'intent': {
'name': 'AMAZON.PlaybackAction<object@MusicCreativeWork>',
'slots': {
'object.byArtist.name': {
'name': 'object.byArtist.name',
'value': 'the shins'
},
'object.composer.name': {
'name': 'object.composer.name'
},
'object.contentSource': {
'name': 'object.contentSource'
},
'object.era': {
'name': 'object.era'
},
'object.genre': {
'name': 'object.genre'
},
'object.name': {
'name': 'object.name'
},
'object.owner.name': {
'name': 'object.owner.name'
},
'object.select': {
'name': 'object.select'
},
'object.sort': {
'name': 'object.sort'
},
'object.type': {
'name': 'object.type',
'value': 'music'
}
}
},
'timestamp': '2016-12-14T23:23:37Z',
'type': 'IntentRequest',
'requestId': REQUEST_ID,
},
'session': {
'sessionId': SESSION_ID,
'application': {
'applicationId': APPLICATION_ID
}
}
}
req = yield from _intent_req(alexa_client, data)
assert req.status == 200
data = yield from req.json()
text = data.get("response", {}).get("outputSpeech",
{}).get("text")
assert text == "Playing the shins."
@asyncio.coroutine
def test_flash_briefing_invalid_id(alexa_client):
"""Test an invalid Flash Briefing ID."""
req = yield from _flash_briefing_req(alexa_client, 10000)
assert req.status == 404
text = yield from req.text()
assert text == ''
@asyncio.coroutine
def test_flash_briefing_date_from_str(alexa_client):
"""Test the response has a valid date parsed from string."""
req = yield from _flash_briefing_req(alexa_client, "weather")
assert req.status == 200
data = yield from req.json()
assert isinstance(datetime.datetime.strptime(data[0].get(
alexa.ATTR_UPDATE_DATE), alexa.DATE_FORMAT), datetime.datetime)
@asyncio.coroutine
def test_flash_briefing_valid(alexa_client):
"""Test the response is valid."""
data = [{
"titleText": "NPR",
"redirectionURL": "https://npr.org",
"streamUrl": NPR_NEWS_MP3_URL,
"mainText": "",
"uid": "uuid",
"updateDate": '2016-10-10T19:51:42.0Z'
}]
req = yield from _flash_briefing_req(alexa_client, "news_audio")
assert req.status == 200
json = yield from req.json()
assert isinstance(datetime.datetime.strptime(json[0].get(
alexa.ATTR_UPDATE_DATE), alexa.DATE_FORMAT), datetime.datetime)
json[0].pop(alexa.ATTR_UPDATE_DATE)
data[0].pop(alexa.ATTR_UPDATE_DATE)
assert json == data
| |
# Copyright (c) 2004 Ian Bicking. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Neither the name of Ian Bicking nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IAN BICKING OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""CSS Selectors based on XPath.
This module supports selecting XML/HTML tags based on CSS selectors.
See the `CSSSelector` class for details.
"""
import re
from lxml import etree
__all__ = ['SelectorSyntaxError', 'ExpressionError',
'CSSSelector']
try:
_basestring = basestring
except NameError:
_basestring = str
class SelectorSyntaxError(SyntaxError):
pass
class ExpressionError(RuntimeError):
pass
class CSSSelector(etree.XPath):
"""A CSS selector.
Usage::
>>> from lxml import etree, cssselect
>>> select = cssselect.CSSSelector("a tag > child")
>>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
>>> [ el.tag for el in select(root) ]
['child']
To use CSS namespaces, you need to pass a prefix-to-namespace
mapping as ``namespaces`` keyword argument::
>>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
>>> select_ns = cssselect.CSSSelector('root > rdf|Description',
... namespaces={'rdf': rdfns})
>>> rdf = etree.XML((
... '<root xmlns:rdf="%s">'
... '<rdf:Description>blah</rdf:Description>'
... '</root>') % rdfns)
>>> [(el.tag, el.text) for el in select_ns(rdf)]
[('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
"""
def __init__(self, css, namespaces=None):
path = css_to_xpath(css)
etree.XPath.__init__(self, path, namespaces=namespaces)
self.css = css
def __repr__(self):
return '<%s %s for %r>' % (
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.css)
##############################
## Token objects:
try:
_unicode = unicode
_unichr = unichr
except NameError:
# Python 3
_unicode = str
_unichr = chr
class _UniToken(_unicode):
def __new__(cls, contents, pos):
obj = _unicode.__new__(cls, contents)
obj.pos = pos
return obj
def __repr__(self):
return '%s(%s, %r)' % (
self.__class__.__name__,
_unicode.__repr__(self),
self.pos)
class Symbol(_UniToken):
pass
class String(_UniToken):
pass
class Token(_UniToken):
pass
############################################################
## Parsing
############################################################
##############################
## Syntax objects:
class Class(object):
"""
Represents selector.class_name
"""
def __init__(self, selector, class_name):
self.selector = selector
self.class_name = class_name
def __repr__(self):
return '%s[%r.%s]' % (
self.__class__.__name__,
self.selector,
self.class_name)
def xpath(self):
sel_xpath = self.selector.xpath()
sel_xpath.add_condition(
"@class and contains(concat(' ', normalize-space(@class), ' '), %s)" % xpath_literal(' '+self.class_name+' '))
return sel_xpath
class Function(object):
"""
Represents selector:name(expr)
"""
unsupported = [
'target', 'lang', 'enabled', 'disabled',]
def __init__(self, selector, type, name, expr):
self.selector = selector
self.type = type
self.name = name
self.expr = expr
def __repr__(self):
return '%s[%r%s%s(%r)]' % (
self.__class__.__name__,
self.selector,
self.type, self.name, self.expr)
def xpath(self):
sel_path = self.selector.xpath()
if self.name in self.unsupported:
raise ExpressionError(
"The pseudo-class %r is not supported" % self.name)
method = '_xpath_' + self.name.replace('-', '_')
if not hasattr(self, method):
raise ExpressionError(
"The pseudo-class %r is unknown" % self.name)
method = getattr(self, method)
return method(sel_path, self.expr)
def _xpath_nth_child(self, xpath, expr, last=False,
add_name_test=True):
a, b = parse_series(expr)
if not a and not b and not last:
# a=0 means nothing is returned...
xpath.add_condition('false() and position() = 0')
return xpath
if add_name_test:
xpath.add_name_test()
xpath.add_star_prefix()
if a == 0:
if last:
b = 'last() - %s' % b
xpath.add_condition('position() = %s' % b)
return xpath
if last:
# FIXME: I'm not sure if this is right
a = -a
b = -b
if b > 0:
b_neg = str(-b)
else:
b_neg = '+%s' % (-b)
if a != 1:
expr = ['(position() %s) mod %s = 0' % (b_neg, a)]
else:
expr = []
if b >= 0:
expr.append('position() >= %s' % b)
elif b < 0 and last:
expr.append('position() < (last() %s)' % b)
expr = ' and '.join(expr)
if expr:
xpath.add_condition(expr)
return xpath
# FIXME: handle an+b, odd, even
# an+b means every-a, plus b, e.g., 2n+1 means odd
# 0n+b means b
# n+0 means a=1, i.e., all elements
# an means every a elements, i.e., 2n means even
# -n means -1n
# -1n+6 means elements 6 and previous
def _xpath_nth_last_child(self, xpath, expr):
return self._xpath_nth_child(xpath, expr, last=True)
def _xpath_nth_of_type(self, xpath, expr):
if xpath.element == '*':
raise NotImplementedError(
"*:nth-of-type() is not implemented")
return self._xpath_nth_child(xpath, expr, add_name_test=False)
def _xpath_nth_last_of_type(self, xpath, expr):
return self._xpath_nth_child(xpath, expr, last=True, add_name_test=False)
def _xpath_contains(self, xpath, expr):
# text content, minus tags, must contain expr
# this selector was removed from the CSS3 spec
# case sensitive for speed, matching jQuery's implementation
if isinstance(expr, Element):
expr = expr._format_element()
xpath.add_condition('contains(string(.), %s)'
% xpath_literal(expr))
return xpath
def _xpath_not(self, xpath, expr):
# everything for which not expr applies
expr = expr.xpath()
cond = expr.condition
# FIXME: should I do something about element_path?
xpath.add_condition('not(%s)' % cond)
return xpath
class Pseudo(object):
"""
Represents selector:ident
"""
unsupported = ['indeterminate', 'first-line', 'first-letter',
'selection', 'before', 'after', 'link', 'visited',
'active', 'focus', 'hover']
def __init__(self, element, type, ident):
self.element = element
assert type in (':', '::')
self.type = type
self.ident = ident
def __repr__(self):
return '%s[%r%s%s]' % (
self.__class__.__name__,
self.element,
self.type, self.ident)
def xpath(self):
el_xpath = self.element.xpath()
if self.ident in self.unsupported:
raise ExpressionError(
"The pseudo-class %r is unsupported" % self.ident)
method = '_xpath_' + self.ident.replace('-', '_')
if not hasattr(self, method):
raise ExpressionError(
"The pseudo-class %r is unknown" % self.ident)
method = getattr(self, method)
el_xpath = method(el_xpath)
return el_xpath
def _xpath_checked(self, xpath):
# FIXME: is this really all the elements?
xpath.add_condition("(@selected or @checked) and (name(.) = 'input' or name(.) = 'option')")
return xpath
def _xpath_root(self, xpath):
xpath.add_condition("not(parent::*)")
return xpath
def _xpath_first_child(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
xpath.add_condition('position() = 1')
return xpath
def _xpath_last_child(self, xpath):
xpath.add_star_prefix()
xpath.add_name_test()
xpath.add_condition('position() = last()')
return xpath
def _xpath_first_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:first-of-type is not implemented")
xpath.add_star_prefix()
xpath.add_condition('position() = 1')
return xpath
def _xpath_last_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:last-of-type is not implemented")
xpath.add_star_prefix()
xpath.add_condition('position() = last()')
return xpath
def _xpath_only_child(self, xpath):
xpath.add_name_test()
xpath.add_star_prefix()
xpath.add_condition('last() = 1')
return xpath
def _xpath_only_of_type(self, xpath):
if xpath.element == '*':
raise NotImplementedError(
"*:only-of-type is not implemented")
xpath.add_condition('last() = 1')
return xpath
def _xpath_empty(self, xpath):
xpath.add_condition("not(*) and not(normalize-space())")
return xpath
class Attrib(object):
"""
Represents selector[namespace|attrib operator value]
"""
def __init__(self, selector, namespace, attrib, operator, value):
self.selector = selector
self.namespace = namespace
self.attrib = attrib
self.operator = operator
self.value = value
def __repr__(self):
if self.operator == 'exists':
return '%s[%r[%s]]' % (
self.__class__.__name__,
self.selector,
self._format_attrib())
else:
return '%s[%r[%s %s %r]]' % (
self.__class__.__name__,
self.selector,
self._format_attrib(),
self.operator,
self.value)
def _format_attrib(self):
if self.namespace == '*':
return self.attrib
else:
return '%s|%s' % (self.namespace, self.attrib)
def _xpath_attrib(self):
# FIXME: if attrib is *?
if self.namespace == '*':
return '@' + self.attrib
else:
return '@%s:%s' % (self.namespace, self.attrib)
def xpath(self):
path = self.selector.xpath()
attrib = self._xpath_attrib()
value = self.value
if self.operator == 'exists':
assert not value
path.add_condition(attrib)
elif self.operator == '=':
path.add_condition('%s = %s' % (attrib,
xpath_literal(value)))
elif self.operator == '!=':
# FIXME: this seems like a weird hack...
if value:
path.add_condition('not(%s) or %s != %s'
% (attrib, attrib, xpath_literal(value)))
else:
path.add_condition('%s != %s'
% (attrib, xpath_literal(value)))
#path.add_condition('%s != %s' % (attrib, xpath_literal(value)))
elif self.operator == '~=':
path.add_condition("%s and contains(concat(' ', normalize-space(%s), ' '), %s)" % (attrib, attrib, xpath_literal(' '+value+' ')))
elif self.operator == '|=':
# Weird, but true...
path.add_condition('%s and (%s = %s or starts-with(%s, %s))' % (
attrib,
attrib, xpath_literal(value),
attrib, xpath_literal(value + '-')))
elif self.operator == '^=':
path.add_condition('%s and starts-with(%s, %s)' % (
attrib, attrib, xpath_literal(value)))
elif self.operator == '$=':
# Oddly there is a starts-with in XPath 1.0, but not ends-with
path.add_condition('%s and substring(%s, string-length(%s)-%s) = %s'
% (attrib, attrib, attrib, len(value)-1, xpath_literal(value)))
elif self.operator == '*=':
# Attribute selectors are case sensitive
path.add_condition('%s and contains(%s, %s)' % (
attrib, attrib, xpath_literal(value)))
else:
assert 0, ("Unknown operator: %r" % self.operator)
return path
class Element(object):
"""
Represents namespace|element
"""
def __init__(self, namespace, element):
self.namespace = namespace
self.element = element
def __repr__(self):
return '%s[%s]' % (
self.__class__.__name__,
self._format_element())
def _format_element(self):
if self.namespace == '*':
return self.element
else:
return '%s|%s' % (self.namespace, self.element)
def xpath(self):
if self.namespace == '*':
el = self.element.lower()
else:
# FIXME: Should we lowercase here?
el = '%s:%s' % (self.namespace, self.element)
return XPathExpr(element=el)
class Hash(object):
"""
Represents selector#id
"""
def __init__(self, selector, id):
self.selector = selector
self.id = id
def __repr__(self):
return '%s[%r#%s]' % (
self.__class__.__name__,
self.selector, self.id)
def xpath(self):
path = self.selector.xpath()
path.add_condition('@id = %s' % xpath_literal(self.id))
return path
class Or(object):
def __init__(self, items):
self.items = items
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.items)
def xpath(self):
paths = [item.xpath() for item in self.items]
return XPathExprOr(paths)
class CombinedSelector(object):
_method_mapping = {
' ': 'descendant',
'>': 'child',
'+': 'direct_adjacent',
'~': 'indirect_adjacent',
}
def __init__(self, selector, combinator, subselector):
assert selector is not None
self.selector = selector
self.combinator = combinator
self.subselector = subselector
def __repr__(self):
if self.combinator == ' ':
comb = '<followed>'
else:
comb = self.combinator
return '%s[%r %s %r]' % (
self.__class__.__name__,
self.selector,
comb,
self.subselector)
def xpath(self):
if self.combinator not in self._method_mapping:
raise ExpressionError(
"Unknown combinator: %r" % self.combinator)
method = '_xpath_' + self._method_mapping[self.combinator]
method = getattr(self, method)
path = self.selector.xpath()
return method(path, self.subselector)
def _xpath_descendant(self, xpath, sub):
# when sub is a descendant in any way of xpath
xpath.join('//', sub.xpath())
return xpath
def _xpath_child(self, xpath, sub):
# when sub is an immediate child of xpath
xpath.join('/', sub.xpath())
return xpath
def _xpath_direct_adjacent(self, xpath, sub):
# when sub immediately follows xpath
xpath.join('/following-sibling::', sub.xpath())
xpath.add_name_test()
xpath.add_condition('position() = 1')
return xpath
def _xpath_indirect_adjacent(self, xpath, sub):
# when sub comes somewhere after xpath as a sibling
xpath.join('/following-sibling::', sub.xpath())
return xpath
##############################
## XPathExpr objects:
_el_re = re.compile(r'^\w+\s*$', re.UNICODE)
_id_re = re.compile(r'^(\w*)#(\w+)\s*$', re.UNICODE)
_class_re = re.compile(r'^(\w*)\.(\w+)\s*$', re.UNICODE)
def css_to_xpath(css_expr, prefix='descendant-or-self::'):
if isinstance(css_expr, _basestring):
match = _el_re.search(css_expr)
if match is not None:
return '%s%s' % (prefix, match.group(0).strip())
match = _id_re.search(css_expr)
if match is not None:
return "%s%s[@id = '%s']" % (
prefix, match.group(1) or '*', match.group(2))
match = _class_re.search(css_expr)
if match is not None:
return "%s%s[@class and contains(concat(' ', normalize-space(@class), ' '), ' %s ')]" % (
prefix, match.group(1) or '*', match.group(2))
css_expr = parse(css_expr)
expr = css_expr.xpath()
assert expr is not None, (
"Got None for xpath expression from %s" % repr(css_expr))
if prefix:
expr.add_prefix(prefix)
return _unicode(expr)
class XPathExpr(object):
def __init__(self, prefix=None, path=None, element='*', condition=None,
star_prefix=False):
self.prefix = prefix
self.path = path
self.element = element
self.condition = condition
self.star_prefix = star_prefix
def __str__(self):
path = ''
if self.prefix is not None:
path += _unicode(self.prefix)
if self.path is not None:
path += _unicode(self.path)
path += _unicode(self.element)
if self.condition:
path += '[%s]' % self.condition
return path
def __repr__(self):
return '%s[%s]' % (
self.__class__.__name__, self)
def add_condition(self, condition):
if self.condition:
self.condition = '%s and (%s)' % (self.condition, condition)
else:
self.condition = condition
def add_path(self, part):
if self.path is None:
self.path = self.element
else:
self.path += self.element
self.element = part
def add_prefix(self, prefix):
if self.prefix:
self.prefix = prefix + self.prefix
else:
self.prefix = prefix
def add_name_test(self):
if self.element == '*':
# We weren't doing a test anyway
return
self.add_condition("name() = %s" % xpath_literal(self.element))
self.element = '*'
def add_star_prefix(self):
"""
Adds a /* prefix if there is no prefix. This is when you need
to keep context's constrained to a single parent.
"""
if self.path:
self.path += '*/'
else:
self.path = '*/'
self.star_prefix = True
def join(self, combiner, other):
prefix = _unicode(self)
prefix += combiner
path = (other.prefix or '') + (other.path or '')
# We don't need a star prefix if we are joining to this other
# prefix; so we'll get rid of it
if other.star_prefix and path == '*/':
path = ''
self.prefix = prefix
self.path = path
self.element = other.element
self.condition = other.condition
class XPathExprOr(XPathExpr):
"""
Represents |'d expressions. Note that unfortunately it isn't
the union, it's the sum, so duplicate elements will appear.
"""
def __init__(self, items, prefix=None):
for item in items:
assert item is not None
self.items = items
self.prefix = prefix
def __str__(self):
prefix = self.prefix or ''
return ' | '.join(["%s%s" % (prefix,i) for i in self.items])
split_at_single_quotes = re.compile("('+)").split
def xpath_literal(s):
if isinstance(s, Element):
# This is probably a symbol that looks like an expression...
s = s._format_element()
else:
s = _unicode(s)
if "'" not in s:
s = "'%s'" % s
elif '"' not in s:
s = '"%s"' % s
else:
s = "concat(%s)" % ','.join([
(("'" in part) and '"%s"' or "'%s'") % part
for part in split_at_single_quotes(s) if part
])
return s
##############################
## Parsing functions
def parse(string):
stream = TokenStream(tokenize(string))
stream.source = string
try:
return parse_selector_group(stream)
except SelectorSyntaxError:
import sys
e = sys.exc_info()[1]
message = "%s at %s -> %r" % (
e, stream.used, stream.peek())
e.msg = message
if sys.version_info < (2,6):
e.message = message
e.args = tuple([message])
raise
def parse_selector_group(stream):
result = []
while 1:
result.append(parse_selector(stream))
if stream.peek() == ',':
stream.next()
# Ignore optional whitespace after a group separator
while stream.peek() == ' ':
stream.next()
else:
break
if len(result) == 1:
return result[0]
else:
return Or(result)
def parse_selector(stream):
result = parse_simple_selector(stream)
while 1:
peek = stream.peek()
if peek == ',' or peek is None:
return result
elif peek in ('+', '>', '~'):
# A combinator
combinator = stream.next()
# Ignore optional whitespace after a combinator
while stream.peek() == ' ':
stream.next()
else:
combinator = ' '
consumed = len(stream.used)
next_selector = parse_simple_selector(stream)
if consumed == len(stream.used):
raise SelectorSyntaxError(
"Expected selector, got '%s'" % stream.peek())
result = CombinedSelector(result, combinator, next_selector)
return result
def parse_simple_selector(stream):
peek = stream.peek()
if peek != '*' and not isinstance(peek, Symbol):
element = namespace = '*'
else:
next = stream.next()
if next != '*' and not isinstance(next, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got '%s'" % next)
if stream.peek() == '|':
namespace = next
stream.next()
element = stream.next()
if element != '*' and not isinstance(next, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got '%s'" % next)
else:
namespace = '*'
element = next
result = Element(namespace, element)
has_hash = False
while 1:
peek = stream.peek()
if peek == '#':
if has_hash:
# You can't have two hashes
# (FIXME: is there some more general rule I'm missing?)
break
stream.next()
result = Hash(result, stream.next())
has_hash = True
continue
elif peek == '.':
stream.next()
result = Class(result, stream.next())
continue
elif peek == '[':
stream.next()
result = parse_attrib(result, stream)
next = stream.next()
if not next == ']':
raise SelectorSyntaxError(
"] expected, got '%s'" % next)
continue
elif peek == ':' or peek == '::':
type = stream.next()
ident = stream.next()
if not isinstance(ident, Symbol):
raise SelectorSyntaxError(
"Expected symbol, got '%s'" % ident)
if stream.peek() == '(':
stream.next()
peek = stream.peek()
if isinstance(peek, String):
selector = stream.next()
elif isinstance(peek, Symbol) and is_int(peek):
selector = int(stream.next())
else:
# FIXME: parse_simple_selector, or selector, or...?
selector = parse_simple_selector(stream)
next = stream.next()
if not next == ')':
raise SelectorSyntaxError(
"Expected ')', got '%s' and '%s'"
% (next, selector))
result = Function(result, type, ident, selector)
else:
result = Pseudo(result, type, ident)
continue
else:
if peek == ' ':
stream.next()
break
# FIXME: not sure what "negation" is
return result
def is_int(v):
try:
int(v)
except ValueError:
return False
else:
return True
def parse_attrib(selector, stream):
attrib = stream.next()
if stream.peek() == '|':
namespace = attrib
stream.next()
attrib = stream.next()
else:
namespace = '*'
if stream.peek() == ']':
return Attrib(selector, namespace, attrib, 'exists', None)
op = stream.next()
if not op in ('^=', '$=', '*=', '=', '~=', '|=', '!='):
raise SelectorSyntaxError(
"Operator expected, got '%s'" % op)
value = stream.next()
if not isinstance(value, (Symbol, String)):
raise SelectorSyntaxError(
"Expected string or symbol, got '%s'" % value)
return Attrib(selector, namespace, attrib, op, value)
def parse_series(s):
"""
Parses things like '1n+2', or 'an+b' generally, returning (a, b)
"""
if isinstance(s, Element):
s = s._format_element()
if not s or s == '*':
# Happens when there's nothing, which the CSS parser thinks of as *
return (0, 0)
if isinstance(s, int):
# Happens when you just get a number
return (0, s)
if s == 'odd':
return (2, 1)
elif s == 'even':
return (2, 0)
elif s == 'n':
return (1, 0)
if 'n' not in s:
# Just a b
return (0, int(s))
a, b = s.split('n', 1)
if not a:
a = 1
elif a == '-' or a == '+':
a = int(a+'1')
else:
a = int(a)
if not b:
b = 0
elif b == '-' or b == '+':
b = int(b+'1')
else:
b = int(b)
return (a, b)
############################################################
## Tokenizing
############################################################
_match_whitespace = re.compile(r'\s+', re.UNICODE).match
_replace_comments = re.compile(r'/\*.*?\*/', re.DOTALL).sub
_match_count_number = re.compile(r'[+-]?\d*n(?:[+-]\d+)?').match
def tokenize(s):
pos = 0
s = _replace_comments('', s)
while 1:
match = _match_whitespace(s, pos=pos)
if match:
preceding_whitespace_pos = pos
pos = match.end()
else:
preceding_whitespace_pos = 0
if pos >= len(s):
return
match = _match_count_number(s, pos=pos)
if match and match.group() != 'n':
sym = s[pos:match.end()]
yield Symbol(sym, pos)
pos = match.end()
continue
c = s[pos]
c2 = s[pos:pos+2]
if c2 in ('~=', '|=', '^=', '$=', '*=', '::', '!='):
if c2 == '::' and preceding_whitespace_pos > 0:
yield Token(' ', preceding_whitespace_pos)
yield Token(c2, pos)
pos += 2
continue
if c in '>+~,.*=[]()|:#':
if c in ':.#[' and preceding_whitespace_pos > 0:
yield Token(' ', preceding_whitespace_pos)
yield Token(c, pos)
pos += 1
continue
if c == '"' or c == "'":
# Quoted string
old_pos = pos
sym, pos = tokenize_escaped_string(s, pos)
yield String(sym, old_pos)
continue
old_pos = pos
sym, pos = tokenize_symbol(s, pos)
yield Symbol(sym, old_pos)
continue
split_at_string_escapes = re.compile(r'(\\(?:%s))'
% '|'.join(['[A-Fa-f0-9]{1,6}(?:\r\n|\s)?',
'[^A-Fa-f0-9]'])).split
def unescape_string_literal(literal):
substrings = []
for substring in split_at_string_escapes(literal):
if not substring:
continue
elif '\\' in substring:
if substring[0] == '\\' and len(substring) > 1:
substring = substring[1:]
if substring[0] in '0123456789ABCDEFabcdef':
# int() correctly ignores the potentially trailing whitespace
substring = _unichr(int(substring, 16))
else:
raise SelectorSyntaxError(
"Invalid escape sequence %r in string %r"
% (substring.split('\\')[1], literal))
substrings.append(substring)
return ''.join(substrings)
def tokenize_escaped_string(s, pos):
quote = s[pos]
assert quote in ('"', "'")
pos = pos+1
start = pos
while 1:
next = s.find(quote, pos)
if next == -1:
raise SelectorSyntaxError(
"Expected closing %s for string in: %r"
% (quote, s[start:]))
result = s[start:next]
if result.endswith('\\'):
# next quote character is escaped
pos = next+1
continue
if '\\' in result:
result = unescape_string_literal(result)
return result, next+1
_illegal_symbol = re.compile(r'[^\w\\-]', re.UNICODE)
def tokenize_symbol(s, pos):
start = pos
match = _illegal_symbol.search(s, pos=pos)
if not match:
# Goes to end of s
return s[start:], len(s)
if match.start() == pos:
assert 0, (
"Unexpected symbol: %r at %s" % (s[pos], pos))
if not match:
result = s[start:]
pos = len(s)
else:
result = s[start:match.start()]
pos = match.start()
try:
result = result.encode('ASCII', 'backslashreplace').decode('unicode_escape')
except UnicodeDecodeError:
import sys
e = sys.exc_info()[1]
raise SelectorSyntaxError(
"Bad symbol %r: %s" % (result, e))
return result, pos
class TokenStream(object):
def __init__(self, tokens, source=None):
self.used = []
self.tokens = iter(tokens)
self.source = source
self.peeked = None
self._peeking = False
try:
self.next_token = self.tokens.next
except AttributeError:
# Python 3
self.next_token = self.tokens.__next__
def next(self):
if self._peeking:
self._peeking = False
self.used.append(self.peeked)
return self.peeked
else:
try:
next = self.next_token()
self.used.append(next)
return next
except StopIteration:
return None
def __iter__(self):
return iter(self.next, None)
def peek(self):
if not self._peeking:
try:
self.peeked = self.next_token()
except StopIteration:
return None
self._peeking = True
return self.peeked
| |
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import ddt
import mock
from oslo_config import cfg
from oslo_utils import importutils
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.glusterfs import layout
from manila import test
from manila.tests import fake_utils
CONF = cfg.CONF
fake_local_share_path = '/mnt/nfs/testvol/fakename'
fake_path_to_private_key = '/fakepath/to/privatekey'
fake_remote_server_password = 'fakepassword'
class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase):
supported_layouts = ('layout_fake.FakeLayout',
'layout_something.SomeLayout')
supported_protocols = ('NFS,')
@ddt.ddt
class GlusterfsShareDriverBaseTestCase(test.TestCase):
"""Tests GlusterfsShareDriverBase."""
def setUp(self):
super(GlusterfsShareDriverBaseTestCase, self).setUp()
CONF.set_default('driver_handles_share_servers', False)
fake_conf, __ = self._setup()
self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf)
self.fake_share = mock.Mock()
self.fake_context = mock.Mock()
self.fake_access = mock.Mock()
def _setup(self):
fake_conf = config.Configuration(None)
fake_layout = mock.Mock()
self.mock_object(importutils, "import_object",
mock.Mock(return_value=fake_layout))
return fake_conf, fake_layout
def test_init(self):
self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False,
configuration=config.Configuration(None))
@ddt.data({'has_snap': None, 'layout_name': None},
{'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'},
{'has_snap': True, 'layout_name': 'layout_something.SomeLayout'})
@ddt.unpack
def test_init_subclass(self, has_snap, layout_name):
conf, _layout = self._setup()
if layout_name is not None:
conf.glusterfs_share_layout = layout_name
if has_snap is None:
del(_layout._snapshots_are_supported)
else:
_layout._snapshots_are_supported = has_snap
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
snap_result = {None: False}.get(has_snap, has_snap)
layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name,
layout_name)
importutils.import_object.assert_called_once_with(
'manila.share.drivers.glusterfs.%s' % layout_result,
_driver, configuration=conf)
self.assertEqual(_layout, _driver.layout)
self.assertEqual(snap_result, _driver.snapshots_are_supported)
def test_init_nosupp_layout(self):
conf = config.Configuration(None)
conf.glusterfs_share_layout = 'nonsense_layout'
self.assertRaises(exception.GlusterfsException,
GlusterfsFakeShareDriver, False, configuration=conf)
def test_setup_via_manager(self):
self.assertIsNone(self._driver._setup_via_manager(mock.Mock()))
@ddt.data('allow', 'deny')
def test_allow_deny_access(self, op):
conf, _layout = self._setup()
gmgr = mock.Mock()
self.mock_object(_layout, '_share_manager',
mock.Mock(return_value=gmgr))
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, "_%s_access_via_manager" % op, mock.Mock())
getattr(_driver, "%s_access" % op)(self.fake_context, self.fake_share,
self.fake_access)
_layout._share_manager.assert_called_once_with(self.fake_share)
getattr(_driver,
"_%s_access_via_manager" % op).assert_called_once_with(
gmgr, self.fake_context, self.fake_share, self.fake_access, None)
@ddt.data('allow', 'deny')
def test_allow_deny_access_via_manager(self, op):
self.assertRaises(NotImplementedError,
getattr(self._driver,
"_%s_access_via_manager" % op),
mock.Mock(), self.fake_context, self.fake_share,
self.fake_access, None)
@ddt.data('NFS', 'PROTATO')
def test_check_proto_baseclass(self, proto):
self.assertRaises(exception.ShareBackendException,
layout.GlusterfsShareDriverBase._check_proto,
{'share_proto': proto})
def test_check_proto(self):
GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'})
def test_check_proto_notsupported(self):
self.assertRaises(exception.ShareBackendException,
GlusterfsFakeShareDriver._check_proto,
{'share_proto': 'PROTATO'})
@ddt.data('', '_from_snapshot')
def test_create_share(self, variant):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_check_proto', mock.Mock())
getattr(_driver, 'create_share%s' % variant)(self.fake_context,
self.fake_share)
_driver._check_proto.assert_called_once_with(self.fake_share)
getattr(_layout,
'create_share%s' % variant).assert_called_once_with(
self.fake_context, self.fake_share)
@ddt.data(True, False)
def test_update_share_stats(self, internal_exception):
data = mock.Mock()
conf, _layout = self._setup()
def raise_exception(*args, **kwargs):
raise NotImplementedError
layoutstats = mock.Mock()
mock_kw = ({'side_effect': raise_exception} if internal_exception
else {'return_value': layoutstats})
self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw))
self.mock_object(driver.ShareDriver, '_update_share_stats',
mock.Mock())
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
_driver._update_share_stats(data)
if internal_exception:
self.assertFalse(data.update.called)
else:
data.update.assert_called_once_with(layoutstats)
driver.ShareDriver._update_share_stats.assert_called_once_with(
data)
@ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot',
'ensure_share', 'manage_existing', 'unmanage', 'extend_share',
'shrink_share')
def test_delegated_methods(self, method):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
fake_args = (mock.Mock(), mock.Mock(), mock.Mock())
getattr(_driver, method)(*fake_args)
getattr(_layout, method).assert_called_once_with(*fake_args)
@ddt.ddt
class GlusterfsShareLayoutBaseTestCase(test.TestCase):
"""Tests GlusterfsShareLayoutBaseTestCase."""
def setUp(self):
super(GlusterfsShareLayoutBaseTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
self.fake_driver = mock.Mock()
self.mock_object(self.fake_driver, '_execute',
self._execute)
class FakeLayout(layout.GlusterfsShareLayoutBase):
def _share_manager(self, share):
"""Return GlusterManager object representing share's backend."""
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management."""
def unmanage(self, share):
"""Removes the specified share from Manila management."""
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
def test_init_invalid(self):
self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase,
mock.Mock())
def test_subclass(self):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
self.assertEqual(fake_conf, _layout.configuration)
self.assertRaises(NotImplementedError, _layout._update_share_stats)
def test_check_mount_glusterfs(self):
fake_conf = mock.Mock()
_driver = mock.Mock()
_driver._execute = mock.Mock()
_layout = self.FakeLayout(_driver, configuration=fake_conf)
_layout._check_mount_glusterfs()
_driver._execute.assert_called_once_with(
'mount.glusterfs',
check_exit_code=False)
@ddt.data({'_errno': errno.ENOENT,
'_exception': exception.GlusterfsException},
{'_errno': errno.EACCES, '_exception': OSError})
@ddt.unpack
def test_check_mount_glusterfs_not_installed(self, _errno, _exception):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
def exec_runner(*ignore_args, **ignore_kwargs):
raise OSError(_errno, os.strerror(_errno))
expected_exec = ['mount.glusterfs']
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self.assertRaises(_exception, _layout._check_mount_glusterfs)
| |
import ctypes
import ctypes.util
import numpy
from numpy.ctypeslib import ndpointer
from ..util import coords
from ..util import _load_extension_libs
_lib, _ext_loaded= _load_extension_libs.load_libgalpy()
def actionAngleStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None,order=10):
"""
NAME:
actionAngleStaeckel_c
PURPOSE:
Use C to calculate actions using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,err)
jr,jz : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2012-12-01 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from ..orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actions
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (jr,jz,err.value)
def actionAngleStaeckel_calcu0(E,Lz,pot,delta):
"""
NAME:
actionAngleStaeckel_calcu0
PURPOSE:
Use C to calculate u0 in the Staeckel approximation
INPUT:
E, Lz - energy and angular momentum
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
OUTPUT:
(u0,err)
u0 : array, shape (len(E))
err - non-zero if error occured
HISTORY:
2012-12-03 - Written - Bovy (IAS)
"""
#Parse the potential
from ..orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
u0= numpy.empty(len(E))
err= ctypes.c_int(0)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.calcu0
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [E.flags['F_CONTIGUOUS'],
Lz.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
E= numpy.require(E,dtype=numpy.float64,requirements=['C','W'])
Lz= numpy.require(Lz,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(E),
E,
Lz,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
u0,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: E= numpy.asfortranarray(E)
if f_cont[1]: Lz= numpy.asfortranarray(Lz)
if f_cont[2]: delta= numpy.asfortranarray(delta)
return (u0,err.value)
def actionAngleFreqStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None,order=10):
"""
NAME:
actionAngleFreqStaeckel_c
PURPOSE:
Use C to calculate actions and frequencies
using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,Omegar,Omegaphi,Omegaz,err)
jr,jz,Omegar,Omegaphi,Omegaz : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2013-08-23 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from ..orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
Omegar= numpy.empty(len(R))
Omegaphi= numpy.empty(len(R))
Omegaz= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actionsFreqs
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,
requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
Omegar,
Omegaphi,
Omegaz,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (jr,jz,Omegar,Omegaphi,Omegaz,err.value)
def actionAngleFreqAngleStaeckel_c(pot,delta,R,vR,vT,z,vz,phi,
u0=None,order=10):
"""
NAME:
actionAngleFreqAngleStaeckel_c
PURPOSE:
Use C to calculate actions, frequencies, and angles
using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz, phi - coordinates (arrays)
u0= (None) if set, u0 to use
order= (10) order of Gauss-Legendre integration of the relevant integrals
OUTPUT:
(jr,jz,Omegar,Omegaphi,Omegaz,Angler,Anglephi,Anglez,err)
jr,jz,Omegar,Omegaphi,Omegaz,Angler,Anglephi,Anglez : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2013-08-27 - Written - Bovy (IAS)
"""
if u0 is None:
u0, dummy= coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from ..orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
jr= numpy.empty(len(R))
jz= numpy.empty(len(R))
Omegar= numpy.empty(len(R))
Omegaphi= numpy.empty(len(R))
Omegaz= numpy.empty(len(R))
Angler= numpy.empty(len(R))
Anglephi= numpy.empty(len(R))
Anglez= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_actionsFreqsAngles
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
jr= numpy.require(jr,dtype=numpy.float64,requirements=['C','W'])
jz= numpy.require(jz,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,
requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
Angler= numpy.require(Angler,dtype=numpy.float64,requirements=['C','W'])
Anglephi= numpy.require(Anglephi,dtype=numpy.float64,
requirements=['C','W'])
Anglez= numpy.require(Anglez,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
ctypes.c_int(order),
jr,
jz,
Omegar,
Omegaphi,
Omegaz,
Angler,
Anglephi,
Anglez,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
badAngle = Anglephi != 9999.99
Anglephi[badAngle]= (Anglephi[badAngle] + phi[badAngle] % (2.*numpy.pi)) % (2.*numpy.pi)
Anglephi[Anglephi < 0.]+= 2.*numpy.pi
return (jr,jz,Omegar,Omegaphi,Omegaz,Angler,
Anglephi,Anglez,err.value)
def actionAngleUminUmaxVminStaeckel_c(pot,delta,R,vR,vT,z,vz,u0=None):
"""
NAME:
actionAngleUminUmaxVminStaeckel_c
PURPOSE:
Use C to calculate umin, umax, and vmin using the Staeckel approximation
INPUT:
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
R, vR, vT, z, vz - coordinates (arrays)
OUTPUT:
(umin,umax,vmin,err)
umin,umax,vmin : array, shape (len(R))
err - non-zero if error occured
HISTORY:
2017-12-12 - Written - Bovy (UofT)
"""
if u0 is None:
u0, dummy= coords.Rz_to_uv(R,z,delta=numpy.atleast_1d(delta))
#Parse the potential
from ..orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Parse delta
delta= numpy.atleast_1d(delta)
ndelta= len(delta)
#Set up result arrays
umin= numpy.empty(len(R))
umax= numpy.empty(len(R))
vmin= numpy.empty(len(R))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.actionAngleStaeckel_uminUmaxVmin
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [R.flags['F_CONTIGUOUS'],
vR.flags['F_CONTIGUOUS'],
vT.flags['F_CONTIGUOUS'],
z.flags['F_CONTIGUOUS'],
vz.flags['F_CONTIGUOUS'],
u0.flags['F_CONTIGUOUS'],
delta.flags['F_CONTIGUOUS']]
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
delta= numpy.require(delta,dtype=numpy.float64,requirements=['C','W'])
umin= numpy.require(umin,dtype=numpy.float64,requirements=['C','W'])
umax= numpy.require(umax,dtype=numpy.float64,requirements=['C','W'])
vmin= numpy.require(vmin,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(R),
R,
vR,
vT,
z,
vz,
u0,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_int(ndelta),
delta,
umin,
umax,
vmin,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: R= numpy.asfortranarray(R)
if f_cont[1]: vR= numpy.asfortranarray(vR)
if f_cont[2]: vT= numpy.asfortranarray(vT)
if f_cont[3]: z= numpy.asfortranarray(z)
if f_cont[4]: vz= numpy.asfortranarray(vz)
if f_cont[5]: u0= numpy.asfortranarray(u0)
if f_cont[6]: delta= numpy.asfortranarray(delta)
return (umin,umax,vmin,err.value)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.hot import template
from heat.engine import properties
from heat.engine.resources import stack_resource
from heat.engine import support
from heat.scaling import template as scl_template
class ResourceChain(stack_resource.StackResource):
"""Creates one or more resources with the same configuration.
The types of resources to be created are passed into the chain
through the ``resources`` property. One resource will be created for each
type listed. Each is passed the configuration specified
under ``resource_properties``.
The ``concurrent`` property controls if the resources will be created
concurrently. If omitted or set to false, each resource will be treated
as having a dependency on the resource before it in the list.
"""
support_status = support.SupportStatus(version='6.0.0')
PROPERTIES = (
RESOURCES, CONCURRENT, RESOURCE_PROPERTIES,
) = (
'resources', 'concurrent', 'resource_properties',
)
ATTRIBUTES = (
REFS, ATTR_ATTRIBUTES,
) = (
'refs', 'attributes',
)
properties_schema = {
RESOURCES: properties.Schema(
properties.Schema.LIST,
description=_('The list of resource types to create. This list '
'may contain type names or aliases defined in '
'the resource registry. Specific template names '
'are not supported.'),
required=True,
update_allowed=True
),
CONCURRENT: properties.Schema(
properties.Schema.BOOLEAN,
description=_('If true, the resources in the chain will be '
'created concurrently. If false or omitted, '
'each resource will be treated as having a '
'dependency on the previous resource in the list.'),
default=False,
),
RESOURCE_PROPERTIES: properties.Schema(
properties.Schema.MAP,
description=_('Properties to pass to each resource being created '
'in the chain.'),
)
}
attributes_schema = {
REFS: attributes.Schema(
description=_('A list of resource IDs for the resources in '
'the chain.'),
type=attributes.Schema.LIST
),
ATTR_ATTRIBUTES: attributes.Schema(
description=_('A map of resource names to the specified attribute '
'of each individual resource.'),
type=attributes.Schema.MAP
),
}
def validate_nested_stack(self):
# Check each specified resource type to ensure it's valid
for resource_type in self.properties[self.RESOURCES]:
try:
self.stack.env.get_class_to_instantiate(resource_type)
except exception.EntityNotFound:
# Valid if it's a template resource
pass
# Check the nested template itself
nested_tmpl = self.child_template()
nested_stack_name = '%s-%s' % (self.stack.name, self.name)
try:
nested_stack = self._parse_nested_stack(nested_stack_name,
nested_tmpl,
{})
# nested_stack.strict_validate = False
nested_stack.validate()
except Exception as ex:
msg = (_('Failed to validate nested template: %s')
% six.text_type(ex))
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
return self.create_with_template(self.child_template())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
return self.update_with_template(self.child_template())
def child_template(self):
resource_types = self.properties[self.RESOURCES]
resource_names = self._resource_names(resource_types)
name_def_tuples = []
for index, rt in enumerate(resource_types):
name = resource_names[index]
depends_on = None
if index > 0 and not self.properties[self.CONCURRENT]:
depends_on = resource_names[index - 1]
t = (name, self._build_resource_definition(name, rt,
depends_on=depends_on))
name_def_tuples.append(t)
nested_template = scl_template.make_template(name_def_tuples)
return nested_template
def child_params(self):
return {}
def get_attribute(self, key, *path):
if key.startswith('resource.'):
return grouputils.get_nested_attrs(self, key, False, *path)
resource_types = self.properties[self.RESOURCES]
names = self._resource_names(resource_types)
if key == self.REFS:
vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
return attributes.select_from_attribute(vals, path)
if key == self.ATTR_ATTRIBUTES:
if not path:
raise exception.InvalidTemplateAttribute(
resource=self.name, key=key)
return dict((n, grouputils.get_rsrc_attr(
self, key, False, n, *path)) for n in names)
path = [key] + list(path)
return [grouputils.get_rsrc_attr(self, key, False, n, *path)
for n in names]
@staticmethod
def _resource_names(resource_types):
"""Returns a list of unique resource names to create."""
return [six.text_type(i) for i, t in enumerate(resource_types)]
def _build_resource_definition(self, resource_name, resource_type,
depends_on=None):
"""Creates a definition object for one of the types in the chain.
The definition will be built from the given name and type and will
use the properties specified in the chain's resource_properties
property. All types in the chain are given the same set of properties.
:type resource_name: str
:type resource_type: str
:param depends_on: if specified, the new resource will depend on the
resource name specified
:type depends_on: str
:return: resource definition suitable for adding to a template
:rtype: heat.engine.rsrc_defn.ResourceDefinition
"""
resource_def = {
template.RES_TYPE: resource_type,
template.RES_PROPERTIES: self.properties[self.RESOURCE_PROPERTIES],
}
if depends_on is not None:
resource_def[template.RES_DEPENDS_ON] = depends_on
return template.HOTemplate20130523.rsrc_defn_from_snippet(
resource_name, resource_def)
def resource_mapping():
"""Hook to install the type under a specific name."""
return {
'OS::Heat::ResourceChain': ResourceChain,
}
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "_models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "_models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkInterface"]
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2017_06_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def _get_effective_route_table_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.EffectiveRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def begin_get_effective_route_table(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.EffectiveRouteListResult"]
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def _list_effective_network_security_groups_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def begin_list_effective_network_security_groups(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.EffectiveNetworkSecurityGroupListResult"]
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json, text/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import uuid
from oslo_config import cfg
from oslo_utils import importutils
import six
from neutron._i18n import _
interface_map = {
'vsctl': 'neutron.agent.ovsdb.impl_vsctl.OvsdbVsctl',
'native': 'neutron.agent.ovsdb.impl_idl.OvsdbIdl',
}
OPTS = [
cfg.StrOpt('ovsdb_interface',
choices=interface_map.keys(),
default='vsctl',
help=_('The interface for interacting with the OVSDB')),
cfg.StrOpt('ovsdb_connection',
default='tcp:127.0.0.1:6640',
help=_('The connection string for the native OVSDB backend. '
'Requires the native ovsdb_interface to be enabled.'))
]
cfg.CONF.register_opts(OPTS, 'OVS')
@six.add_metaclass(abc.ABCMeta)
class Command(object):
"""An OVSDB command that can be executed in a transaction
:attr result: The result of executing the command in a transaction
"""
@abc.abstractmethod
def execute(self, **transaction_options):
"""Immediately execute an OVSDB command
This implicitly creates a transaction with the passed options and then
executes it, returning the value of the executed transaction
:param transaction_options: Options to pass to the transaction
"""
@six.add_metaclass(abc.ABCMeta)
class Transaction(object):
@abc.abstractmethod
def commit(self):
"""Commit the transaction to OVSDB"""
@abc.abstractmethod
def add(self, command):
"""Append an OVSDB operation to the transaction"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
if exc_type is None:
self.result = self.commit()
@six.add_metaclass(abc.ABCMeta)
class API(object):
def __init__(self, context):
self.context = context
@staticmethod
def get(context, iface_name=None):
"""Return the configured OVSDB API implementation"""
iface = importutils.import_class(
interface_map[iface_name or cfg.CONF.OVS.ovsdb_interface])
return iface(context)
@abc.abstractmethod
def transaction(self, check_error=False, log_errors=True, **kwargs):
"""Create a transaction
:param check_error: Allow the transaction to raise an exception?
:type check_error: bool
:param log_errors: Log an error if the transaction fails?
:type log_errors: bool
:returns: A new transaction
:rtype: :class:`Transaction`
"""
@abc.abstractmethod
def add_br(self, name, may_exist=True, datapath_type=None):
"""Create a command to add an OVS bridge
:param name: The name of the bridge
:type name: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:param datapath_type: The datapath_type of the bridge
:type datapath_type: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_br(self, name, if_exists=True):
"""Create a command to delete an OVS bridge
:param name: The name of the bridge
:type name: string
:param if_exists: Do not fail if the bridge does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def br_exists(self, name):
"""Create a command to check if an OVS bridge exists
:param name: The name of the bridge
:type name: string
:returns: :class:`Command` with bool result
"""
@abc.abstractmethod
def port_to_br(self, name):
"""Create a command to return the name of the bridge with the port
:param name: The name of the OVS port
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def iface_to_br(self, name):
"""Create a command to return the name of the bridge with the interface
:param name: The name of the OVS interface
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def list_br(self):
"""Create a command to return the current list of OVS bridge names
:returns: :class:`Command` with list of bridge names result
"""
@abc.abstractmethod
def br_get_external_id(self, name, field):
"""Create a command to return a field from the Bridge's external_ids
:param name: The name of the OVS Bridge
:type name: string
:param field: The external_ids field to return
:type field: string
:returns: :class:`Command` with field value result
"""
@abc.abstractmethod
def db_create(self, table, **col_values):
"""Create a command to create new record
:param table: The OVS table containing the record to be created
:type table: string
:param col_values: The columns and their associated values
to be set after create
:type col_values: Dictionary of columns id's and values
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_destroy(self, table, record):
"""Create a command to destroy a record
:param table: The OVS table containing the record to be destroyed
:type table: string
:param record: The record id (name/uuid) to be destroyed
:type record: uuid/string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_set(self, table, record, *col_values):
"""Create a command to set fields in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type table: string
:param col_values: The columns and their associated values
:type col_values: Tuples of (column, value). Values may be atomic
values or unnested sequences/mappings
:returns: :class:`Command` with no result
"""
# TODO(twilson) Consider handling kwargs for arguments where order
# doesn't matter. Though that would break the assert_called_once_with
# unit tests
@abc.abstractmethod
def db_clear(self, table, record, column):
"""Create a command to clear a field's value in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type record: string
:param column: The column whose value should be cleared
:type column: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_get(self, table, record, column):
"""Create a command to return a field's value in a record
:param table: The OVS table containing the record to be queried
:type table: string
:param record: The record id (name/uuid) to be queried
:type record: string
:param column: The column whose value should be returned
:type column: string
:returns: :class:`Command` with the field's value result
"""
@abc.abstractmethod
def db_list(self, table, records=None, columns=None, if_exists=False):
"""Create a command to return a list of OVSDB records
:param table: The OVS table to query
:type table: string
:param records: The records to return values from
:type records: list of record ids (names/uuids)
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:param if_exists: Do not fail if the record does not exist
:type if_exists: bool
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def db_find(self, table, *conditions, **kwargs):
"""Create a command to return find OVSDB records matching conditions
:param table: The OVS table to query
:type table: string
:param conditions:The conditions to satisfy the query
:type conditions: 3-tuples containing (column, operation, match)
Type of 'match' parameter MUST be identical to column
type
Examples:
atomic: ('tag', '=', 7)
map: ('external_ids' '=', {'iface-id': 'xxx'})
field exists?
('external_ids', '!=', {'iface-id', ''})
set contains?:
('protocols', '{>=}', 'OpenFlow13')
See the ovs-vsctl man page for more operations
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def set_controller(self, bridge, controllers):
"""Create a command to set an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:param controllers: The controller strings
:type controllers: list of strings, see ovs-vsctl manpage for format
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_controller(self, bridge):
"""Create a command to clear an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def get_controller(self, bridge):
"""Create a command to return an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of controller strings result
"""
@abc.abstractmethod
def set_fail_mode(self, bridge, mode):
"""Create a command to set an OVS bridge's failure mode
:param bridge: The name of the bridge
:type bridge: string
:param mode: The failure mode
:type mode: "secure" or "standalone"
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def add_port(self, bridge, port, may_exist=True):
"""Create a command to add a port to an OVS bridge
:param bridge: The name of the bridge
:type bridge: string
:param port: The name of the port
:type port: string
:param may_exist: Do not fail if the port already exists
:type may_exist: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_port(self, port, bridge=None, if_exists=True):
"""Create a command to delete a port an OVS port
:param port: The name of the port
:type port: string
:param bridge: Only delete port if it is attached to this bridge
:type bridge: string
:param if_exists: Do not fail if the port does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def list_ports(self, bridge):
"""Create a command to list the names of ports on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of port names result
"""
@abc.abstractmethod
def list_ifaces(self, bridge):
"""Create a command to list the names of interfaces on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of interfaces names result
"""
class TimeoutException(Exception):
pass
def val_to_py(val):
"""Convert a json ovsdb return value to native python object"""
if isinstance(val, collections.Sequence) and len(val) == 2:
if val[0] == "uuid":
return uuid.UUID(val[1])
elif val[0] == "set":
return [val_to_py(x) for x in val[1]]
elif val[0] == "map":
return {val_to_py(x): val_to_py(y) for x, y in val[1]}
return val
def py_to_val(pyval):
"""Convert python value to ovs-vsctl value argument"""
if isinstance(pyval, bool):
return 'true' if pyval is True else 'false'
elif pyval == '':
return '""'
else:
return pyval
| |
from tastypie import fields
from tastypie.resources import ModelResource, Resource, ALL, Bundle
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.serializers import Serializer
from snappybouncer.models import Conversation, UserAccount, Ticket
from snappybouncer.tasks import (send_helpdesk_response,
send_helpdesk_response_jembi)
import logging
import json
import re
import urlparse
from HTMLParser import HTMLParser
from django.core.exceptions import ObjectDoesNotExist
from django.conf.urls import url
logger = logging.getLogger(__name__)
# ModelResource access using standard format
class UserAccountResource(ModelResource):
class Meta:
queryset = UserAccount.objects.all()
resource_name = 'useraccount'
list_allowed_methods = ['get']
include_resource_uri = True
always_return_data = True
authentication = ApiKeyAuthentication()
authorization = Authorization()
filtering = {
'key': ALL,
}
class ConversationResource(ModelResource):
user_account = fields.ToOneField(UserAccountResource, 'user_account')
class Meta:
queryset = Conversation.objects.all()
resource_name = 'conversation'
list_allowed_methods = ['get']
include_resource_uri = True
always_return_data = True
authentication = ApiKeyAuthentication()
authorization = Authorization()
filtering = {
'key': ALL,
'user_account': ALL
}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/key/(?P<key>[\w\d_.-]+)/$" %
self._meta.resource_name,
self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class TicketResource(ModelResource):
conversation = fields.ToOneField(ConversationResource, 'conversation')
class Meta:
queryset = Ticket.objects.all()
resource_name = 'ticket'
list_allowed_methods = ['get', 'post']
include_resource_uri = True
always_return_data = True
authentication = ApiKeyAuthentication()
authorization = Authorization()
filtering = {
'contact_key': ALL,
'msisdn': ALL,
'user_account': ALL
}
# Resource custom API for WebHooks
class urlencodeSerializer(Serializer):
formats = ['json', 'jsonp', 'xml', 'yaml', 'html', 'plist', 'urlencode']
content_types = {
'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist',
'urlencode': 'application/x-www-form-urlencoded',
}
def from_urlencode(self, data, options=None):
""" handles basic formencoded url posts """
qs = dict((k, v if len(v) > 1 else v[0])
for k, v in urlparse.parse_qs(data).iteritems())
return qs
def to_urlencode(self, content):
pass
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
# We need a generic object to shove data in/get data from.
class WebhookObject(object):
def __init__(self, initial=None):
self.__dict__['_data'] = {}
if hasattr(initial, 'items'):
self.__dict__['_data'] = initial
def __getattr__(self, name):
return self._data.get(name, None)
def __setattr__(self, name, value):
self.__dict__['_data'][name] = value
def to_dict(self):
return self._data
class WebhookResource(Resource):
# Just like a Django ``Form`` or ``Model``, we're defining all the
# fields we're going to handle with the API here.
event = fields.CharField(attribute='event')
data = fields.CharField(attribute='data')
class Meta:
resource_name = 'listener'
list_allowed_methods = ['post']
object_class = WebhookObject
authentication = ApiKeyAuthentication()
authorization = Authorization()
serializer = urlencodeSerializer()
# The following methods need overriding regardless of the
# data source.
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.uuid
else:
kwargs['pk'] = bundle_or_obj.uuid
return kwargs
def extract_tag(self, hashtags):
# ["@person", "#coffee", "#payment"] -> "coffee"
for hashtag in hashtags:
if hashtag[0] == "#":
return hashtag[1::]
return None
def obj_create(self, bundle, **kwargs):
bundle.obj = WebhookObject(initial=kwargs)
bundle = self.full_hydrate(bundle)
# React to the specific events
allowed_events = ['message.outgoing']
if bundle.obj.event in allowed_events:
# strips newlines from dodgy json from API - bug logged
# and turns into a dict
bundle.obj.data = json.loads(re.sub("\\n", "", bundle.obj.data))
if bundle.obj.event == 'message.outgoing':
# Get the pre-existing ticket
ticket = Ticket.objects.get(
support_nonce=bundle.obj.data["note"]["ticket"]["nonce"])
try:
# Save the snappy ticket data
ticket.response = strip_tags(
bundle.obj.data["note"]["content"])
ticket.support_id = int(
bundle.obj.data["note"]["ticket"]["id"])
ticket.operator = bundle.obj.data[
"note"]["created_by_staff_id"]
ticket.tag = self.extract_tag(
bundle.obj.data["note"]["ticket"]["tags"])
ticket.save()
# Send the message out to user via Vumi via Celery
send_helpdesk_response.delay(ticket)
# Post the ticket info to Jembi
send_helpdesk_response_jembi.delay(ticket)
except ObjectDoesNotExist:
logger.error(
'Webhook received for unrecognised support ticket',
exc_info=True)
return bundle
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drivers for volumes.
"""
import time
import os
from xml.etree import ElementTree
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.volume import volume_types
LOG = logging.getLogger("nova.volume.driver")
FLAGS = flags.FLAGS
flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('num_shell_tries', 3,
'number of times to attempt to run flakey shell commands')
flags.DEFINE_string('num_iscsi_scan_tries', 3,
'number of times to rescan iSCSI target to find volume')
flags.DEFINE_integer('num_shelves',
100,
'Number of vblade shelves')
flags.DEFINE_integer('blades_per_shelf',
16,
'Number of vblade blades per shelf')
flags.DEFINE_integer('iscsi_num_targets',
100,
'Number of iscsi target ids per host')
flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:',
'prefix for iscsi volumes')
flags.DEFINE_string('iscsi_ip_prefix', '$my_ip',
'discover volumes on the ip that starts with this prefix')
flags.DEFINE_string('rbd_pool', 'rbd',
'the rbd pool in which volumes are stored')
class VolumeDriver(object):
"""Executes commands relating to Volumes."""
def __init__(self, execute=utils.execute,
sync_exec=utils.execute, *args, **kwargs):
# NOTE(vish): db is set by Manager
self.db = None
self._execute = execute
self._sync_exec = sync_exec
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except exception.ProcessExecutionError:
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
LOG.exception(_("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
run_as_root=True)
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
def _create_volume(self, volume_name, sizestr):
self._try_execute('lvcreate', '-L', sizestr, '-n',
volume_name, FLAGS.volume_group, run_as_root=True)
def _copy_volume(self, srcstr, deststr, size_in_g):
self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % (size_in_g * 1024), 'bs=1M',
run_as_root=True)
def _volume_not_present(self, volume_name):
path_name = '%s/%s' % (FLAGS.volume_group, volume_name)
try:
self._try_execute('lvdisplay', path_name, run_as_root=True)
except Exception as e:
# If the volume isn't present
return True
return False
def _delete_volume(self, volume, size_in_g):
"""Deletes a logical volume."""
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
self._copy_volume('/dev/zero', self.local_path(volume), size_in_g)
self._try_execute('lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
self._escape_snapshot(volume['name'])),
run_as_root=True)
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100M'
return '%sG' % size_in_g
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
def _escape_snapshot(self, snapshot_name):
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def create_volume(self, volume):
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted."""
self._create_volume(volume['name'], self._sizestr(volume['size']))
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'], self._sizestr(volume['size']))
self._copy_volume(self.local_path(snapshot), self.local_path(volume),
snapshot['volume_size'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
# TODO(yamahata): lvm can't delete origin volume only without
# deleting derived snapshots. Can we do something fancy?
out, err = self._execute('lvdisplay', '--noheading',
'-C', '-o', 'Attr',
'%s/%s' % (FLAGS.volume_group,
volume['name']),
run_as_root=True)
# fake_execute returns None resulting unit test error
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume, volume['size'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
self._try_execute('lvcreate', '-L',
self._sizestr(snapshot['volume_size']),
'--name', self._escape_snapshot(snapshot['name']),
'--snapshot', orig_lv_name, run_as_root=True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, snapshot['volume_size'])
def local_path(self, volume):
# NOTE(vish): stops deprecation warning
escaped_group = FLAGS.volume_group.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
raise NotImplementedError()
def create_export(self, context, volume):
"""Exports the volume. Can optionally return a Dictionary of changes
to the volume object to be persisted."""
raise NotImplementedError()
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
raise NotImplementedError()
def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
raise NotImplementedError()
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
raise NotImplementedError()
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
raise NotImplementedError()
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first."""
return None
class AOEDriver(VolumeDriver):
"""WARNING! Deprecated. This driver will be removed in Essex. Its use
is not recommended.
Implements AOE specific volume commands."""
def __init__(self, *args, **kwargs):
LOG.warn(_("AOEDriver is deprecated and will be removed in Essex"))
super(AOEDriver, self).__init__(*args, **kwargs)
def ensure_export(self, context, volume):
# NOTE(vish): we depend on vblade-persist for recreating exports
pass
def _ensure_blades(self, context):
"""Ensure that blades have been created in datastore."""
total_blades = FLAGS.num_shelves * FLAGS.blades_per_shelf
if self.db.export_device_count(context) >= total_blades:
return
for shelf_id in xrange(FLAGS.num_shelves):
for blade_id in xrange(FLAGS.blades_per_shelf):
dev = {'shelf_id': shelf_id, 'blade_id': blade_id}
self.db.export_device_create_safe(context, dev)
def create_export(self, context, volume):
"""Creates an export for a logical volume."""
self._ensure_blades(context)
(shelf_id,
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
volume['id'])
self._try_execute(
'vblade-persist', 'setup',
shelf_id,
blade_id,
FLAGS.aoe_eth_dev,
"/dev/%s/%s" %
(FLAGS.volume_group,
volume['name']),
run_as_root=True)
# NOTE(vish): The standard _try_execute does not work here
# because these methods throw errors if other
# volumes on this host are in the process of
# being created. The good news is the command
# still works for the other volumes, so we
# just wait a bit for the current volume to
# be ready and ignore any errors.
time.sleep(2)
self._execute('vblade-persist', 'auto', 'all',
check_exit_code=False, run_as_root=True)
self._execute('vblade-persist', 'start', 'all',
check_exit_code=False, run_as_root=True)
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume['id'])
self._try_execute('vblade-persist', 'stop',
shelf_id, blade_id, run_as_root=True)
self._try_execute('vblade-persist', 'destroy',
shelf_id, blade_id, run_as_root=True)
def discover_volume(self, context, _volume):
"""Discover volume on a remote host."""
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
_volume['id'])
self._execute('aoe-discover', run_as_root=True)
out, err = self._execute('aoe-stat', check_exit_code=False,
run_as_root=True)
device_path = 'e%(shelf_id)d.%(blade_id)d' % locals()
if out.find(device_path) >= 0:
return "/dev/etherd/%s" % device_path
else:
return
def undiscover_volume(self, _volume):
"""Undiscover volume on a remote host."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume_id)
cmd = ('vblade-persist', 'ls', '--no-header')
out, _err = self._execute(*cmd, run_as_root=True)
exported = False
for line in out.split('\n'):
param = line.split(' ')
if len(param) == 6 and param[0] == str(shelf_id) \
and param[1] == str(blade_id) and param[-1] == "run":
exported = True
break
if not exported:
# Instance will be terminated in this case.
desc = _("Cannot confirm exported volume id:%(volume_id)s. "
"vblade process for e%(shelf_id)s.%(blade_id)s "
"isn't running.") % locals()
raise exception.ProcessExecutionError(out, _err, cmd=cmd,
description=desc)
class FakeAOEDriver(AOEDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeAOEDriver, self).__init__(execute=self.fake_execute,
sync_exec=self.fake_execute,
*args, **kwargs)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE AOE: %s"), cmd)
return (None, None)
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
:provider_location: if present, contains the iSCSI target information
in the same format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
:provider_auth: if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._sync_exec('ietadm', '--op', 'new',
"--tid=%s" % iscsi_target,
'--params',
"Name=%s" % iscsi_name,
run_as_root=True,
check_exit_code=False)
self._sync_exec('ietadm', '--op', 'new',
"--tid=%s" % iscsi_target,
'--lun=0',
'--params',
"Path=%s,Type=fileio" % volume_path,
run_as_root=True,
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
"""Ensure that target ids have been created in datastore."""
host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host)
if host_iscsi_targets >= FLAGS.iscsi_num_targets:
return
# NOTE(vish): Target ids start at 1, not 0.
for target_num in xrange(1, FLAGS.iscsi_num_targets + 1):
target = {'host': host, 'target_num': target_num}
self.db.iscsi_target_create_safe(context, target)
def create_export(self, context, volume):
"""Creates an export for a logical volume."""
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._execute('ietadm', '--op', 'new',
'--tid=%s' % iscsi_target,
'--params', 'Name=%s' % iscsi_name, run_as_root=True)
self._execute('ietadm', '--op', 'new',
'--tid=%s' % iscsi_target,
'--lun=0', '--params',
'Path=%s,Type=fileio' % volume_path, run_as_root=True)
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
# ietadm show will exit with an error
# this export has already been removed
self._execute('ietadm', '--op', 'show',
'--tid=%s' % iscsi_target, run_as_root=True)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"is presently exported for volume: %d"), volume['id'])
return
self._execute('ietadm', '--op', 'delete',
'--tid=%s' % iscsi_target,
'--lun=0', run_as_root=True)
self._execute('ietadm', '--op', 'delete',
'--tid=%s' % iscsi_target, run_as_root=True)
def _do_iscsi_discovery(self, volume):
#TODO(justinsb): Deprecate discovery and use stored info
#NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
volume_name = volume['name']
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', volume['host'],
run_as_root=True)
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
return target
return None
def _get_iscsi_properties(self, volume):
"""Gets iscsi configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSCSI discovery output
properties['target_discovered'] = False
else:
location = self._do_iscsi_discovery(volume)
if not location:
raise exception.Error(_("Could not find iSCSI export "
" for volume %s") %
(volume['name']))
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
properties['target_discovered'] = True
(iscsi_target, _sep, iscsi_name) = location.partition(" ")
iscsi_portal = iscsi_target.split(",")[0]
properties['target_iqn'] = iscsi_name
properties['target_portal'] = iscsi_portal
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return properties
def _run_iscsiadm(self, iscsi_properties, iscsi_command):
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command)
def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
if not iscsi_properties['target_discovered']:
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
self._run_iscsiadm(iscsi_properties, ("--login", ))
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(mount_device):
if tries >= FLAGS.num_iscsi_scan_tries:
raise exception.Error(_("iSCSI device not found at %s") %
(mount_device))
LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan", ))
tries = tries + 1
if not os.path.exists(mount_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(mount_device)s "
"(after %(tries)s rescans)") %
locals())
return mount_device
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
self._run_iscsiadm(iscsi_properties, ("--logout", ))
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
tid = self.db.volume_get_iscsi_target_num(context, volume_id)
try:
self._execute('ietadm', '--op', 'show',
'--tid=%(tid)d' % locals(), run_as_root=True)
except exception.ProcessExecutionError, e:
# Instances remount read-only in this case.
# /etc/init.d/iscsitarget restart and rebooting nova-volume
# is better since ensure_export() works at boot time.
logging.error(_("Cannot confirm exported volume "
"id:%(volume_id)s.") % locals())
raise
class FakeISCSIDriver(ISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
sync_exec=self.fake_execute,
*args, **kwargs)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
return "/dev/disk/by-path/volume-id-%d" % volume['id']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
class RBDDriver(VolumeDriver):
"""Implements RADOS block device (RBD) volume commands"""
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
(stdout, stderr) = self._execute('rados', 'lspools')
pools = stdout.split("\n")
if not FLAGS.rbd_pool in pools:
raise exception.Error(_("rbd has no pool %s") %
FLAGS.rbd_pool)
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100
else:
size = int(volume['size']) * 1024
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'--size', size, 'create', volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'rm', volume['name'])
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot"""
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'snap', 'create', '--snap', snapshot['name'],
snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot"""
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'snap', 'rm', '--snap', snapshot['name'],
snapshot['volume_name'])
def local_path(self, volume):
"""Returns the path of the rbd volume."""
# This is the same as the remote path
# since qemu accesses it directly.
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume"""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume"""
pass
def discover_volume(self, context, volume):
"""Discover volume on a remote host"""
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass
class SheepdogDriver(VolumeDriver):
"""Executes commands relating to Sheepdog Volumes"""
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
(out, err) = self._execute('collie', 'cluster', 'info')
if not out.startswith('running'):
raise exception.Error(_("Sheepdog is not working: %s") % out)
except exception.ProcessExecutionError:
raise exception.Error(_("Sheepdog is not working"))
def create_volume(self, volume):
"""Creates a sheepdog volume"""
self._try_execute('qemu-img', 'create',
"sheepdog:%s" % volume['name'],
self._sizestr(volume['size']))
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a sheepdog volume from a snapshot."""
self._try_execute('qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (snapshot['volume_name'],
snapshot['name']),
"sheepdog:%s" % volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume"""
self._try_execute('collie', 'vdi', 'delete', volume['name'])
def create_snapshot(self, snapshot):
"""Creates a sheepdog snapshot"""
self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
"sheepdog:%s" % snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Deletes a sheepdog snapshot"""
self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
'-s', snapshot['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
def ensure_export(self, context, volume):
"""Safely and synchronously recreates an export for a logical volume"""
pass
def create_export(self, context, volume):
"""Exports the volume"""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume"""
pass
def discover_volume(self, context, volume):
"""Discover volume on a remote host"""
return "sheepdog:%s" % volume['name']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass
class LoggingVolumeDriver(VolumeDriver):
"""Logs and records calls, for unit tests."""
def check_for_setup_error(self):
pass
def create_volume(self, volume):
self.log_action('create_volume', volume)
def delete_volume(self, volume):
self.log_action('delete_volume', volume)
def local_path(self, volume):
print "local_path not implemented"
raise NotImplementedError()
def ensure_export(self, context, volume):
self.log_action('ensure_export', volume)
def create_export(self, context, volume):
self.log_action('create_export', volume)
def remove_export(self, context, volume):
self.log_action('remove_export', volume)
def discover_volume(self, context, volume):
self.log_action('discover_volume', volume)
def undiscover_volume(self, volume):
self.log_action('undiscover_volume', volume)
def check_for_export(self, context, volume_id):
self.log_action('check_for_export', volume_id)
_LOGS = []
@staticmethod
def clear_logs():
LoggingVolumeDriver._LOGS = []
@staticmethod
def log_action(action, parameters):
"""Logs the command."""
LOG.debug(_("LoggingVolumeDriver: %s") % (action))
log_dictionary = {}
if parameters:
log_dictionary = dict(parameters)
log_dictionary['action'] = action
LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
LoggingVolumeDriver._LOGS.append(log_dictionary)
@staticmethod
def all_logs():
return LoggingVolumeDriver._LOGS
@staticmethod
def logs_like(action, **kwargs):
matches = []
for entry in LoggingVolumeDriver._LOGS:
if entry['action'] != action:
continue
match = True
for k, v in kwargs.iteritems():
if entry.get(k) != v:
match = False
break
if match:
matches.append(entry)
return matches
class ZadaraBEDriver(ISCSIDriver):
"""Performs actions to configure Zadara BE module."""
def _is_vsa_volume(self, volume):
return volume_types.is_vsa_volume(volume['volume_type_id'])
def _is_vsa_drive(self, volume):
return volume_types.is_vsa_drive(volume['volume_type_id'])
def _not_vsa_volume_or_drive(self, volume):
"""Returns True if volume is not VSA BE volume."""
if not volume_types.is_vsa_object(volume['volume_type_id']):
LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name'])
return True
else:
return False
def check_for_setup_error(self):
"""No setup necessary for Zadara BE."""
pass
""" Volume Driver methods """
def create_volume(self, volume):
"""Creates BE volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_volume(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s creation - do nothing"),
volume['name'])
return
if int(volume['size']) == 0:
sizestr = '0' # indicates full-partition
else:
sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
# Set the qos-str to default type sas
qosstr = 'SAS_1000'
volume_type = volume_types.get_volume_type(None,
volume['volume_type_id'])
if volume_type is not None:
qosstr = volume_type['extra_specs']['drive_type'] + \
("_%s" % volume_type['extra_specs']['drive_size'])
vsa_id = None
for i in volume.get('volume_metadata'):
if i['key'] == 'to_vsa_id':
vsa_id = i['value']
break
try:
self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
'create_qospart',
'--qos', qosstr,
'--pname', volume['name'],
'--psize', sizestr,
'--vsaid', vsa_id,
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE create_volume for %s failed"), volume['name'])
raise
LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
def delete_volume(self, volume):
"""Deletes BE volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).delete_volume(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"),
volume['name'])
return
try:
self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
'delete_partition',
'--pname', volume['name'],
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
return
LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
def local_path(self, volume):
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).local_path(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s local path call - call discover"),
volume['name'])
return super(ZadaraBEDriver, self).discover_volume(None, volume)
raise exception.Error(_("local_path not supported"))
def ensure_export(self, context, volume):
"""ensure BE export for a volume"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).ensure_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"),
volume['name'])
return
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
ret = self._common_be_export(context, volume, iscsi_target)
except exception.ProcessExecutionError:
return
return ret
def create_export(self, context, volume):
"""create BE export for a volume"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s create export - do nothing"),
volume['name'])
return
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
try:
ret = self._common_be_export(context, volume, iscsi_target)
except:
raise exception.ProcessExecutionError
return ret
def remove_export(self, context, volume):
"""Removes BE export for a volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).remove_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"),
volume['name'])
return
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
'remove_export',
'--pname', volume['name'],
'--tid', iscsi_target,
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
return
def create_snapshot(self, snapshot):
"""Nothing required for snapshot"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_snapshot(volume)
pass
def delete_snapshot(self, snapshot):
"""Nothing required to delete a snapshot"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).delete_snapshot(volume)
pass
""" Internal BE Volume methods """
def _common_be_export(self, context, volume, iscsi_target):
"""
Common logic that asks zadara_sncfg to setup iSCSI target/lun for
this volume
"""
(out, err) = self._sync_exec(
'/var/lib/zadara/bin/zadara_sncfg',
'create_export',
'--pname', volume['name'],
'--tid', iscsi_target,
run_as_root=True,
check_exit_code=0)
result_xml = ElementTree.fromstring(out)
response_node = result_xml.find("Sn")
if response_node is None:
msg = "Malformed response from zadara_sncfg"
raise exception.Error(msg)
sn_ip = response_node.findtext("SnIp")
sn_iqn = response_node.findtext("IqnName")
iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target)
model_update = {}
model_update['provider_location'] = ("%s %s" %
(iscsi_portal,
sn_iqn))
return model_update
def _get_qosgroup_summary(self):
"""gets the list of qosgroups from Zadara BE"""
try:
(out, err) = self._sync_exec(
'/var/lib/zadara/bin/zadara_sncfg',
'get_qosgroups_xml',
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("Failed to retrieve QoS info"))
return {}
qos_groups = {}
result_xml = ElementTree.fromstring(out)
for element in result_xml.findall('QosGroup'):
qos_group = {}
# get the name of the group.
# If we cannot find it, forget this element
group_name = element.findtext("Name")
if not group_name:
continue
# loop through all child nodes & fill up attributes of this group
for child in element.getchildren():
# two types of elements - property of qos-group & sub property
# classify them accordingly
if child.text:
qos_group[child.tag] = int(child.text) \
if child.text.isdigit() else child.text
else:
subelement = {}
for subchild in child.getchildren():
subelement[subchild.tag] = int(subchild.text) \
if subchild.text.isdigit() else subchild.text
qos_group[child.tag] = subelement
# Now add this group to the master qos_groups
qos_groups[group_name] = qos_group
return qos_groups
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first."""
drive_info = self._get_qosgroup_summary()
return {'drive_qos_info': drive_info}
| |
import unittest
from os.path import join
from sys import version as py_version
from unittest import mock
from pythonforandroid.recommendations import (
check_ndk_api,
check_ndk_version,
check_target_api,
read_ndk_version,
check_python_version,
print_recommendations,
MAX_NDK_VERSION,
RECOMMENDED_NDK_VERSION,
RECOMMENDED_TARGET_API,
MIN_NDK_API,
MIN_NDK_VERSION,
NDK_DOWNLOAD_URL,
ARMEABI_MAX_TARGET_API,
MIN_TARGET_API,
UNKNOWN_NDK_MESSAGE,
PARSE_ERROR_NDK_MESSAGE,
READ_ERROR_NDK_MESSAGE,
ENSURE_RIGHT_NDK_MESSAGE,
NDK_LOWER_THAN_SUPPORTED_MESSAGE,
UNSUPPORTED_NDK_API_FOR_ARMEABI_MESSAGE,
CURRENT_NDK_VERSION_MESSAGE,
RECOMMENDED_NDK_VERSION_MESSAGE,
TARGET_NDK_API_GREATER_THAN_TARGET_API_MESSAGE,
OLD_NDK_API_MESSAGE,
NEW_NDK_MESSAGE,
OLD_API_MESSAGE,
MIN_PYTHON_MAJOR_VERSION,
MIN_PYTHON_MINOR_VERSION,
PY2_ERROR_TEXT,
PY_VERSION_ERROR_TEXT,
)
from pythonforandroid.util import BuildInterruptingException
running_in_py2 = int(py_version[0]) < 3
class TestRecommendations(unittest.TestCase):
"""
An inherited class of `unittest.TestCase`to test the module
:mod:`~pythonforandroid.recommendations`.
"""
def setUp(self):
self.ndk_dir = "/opt/android/android-ndk"
@unittest.skipIf(running_in_py2, "`assertLogs` requires Python 3.4+")
@mock.patch("pythonforandroid.recommendations.read_ndk_version")
def test_check_ndk_version_greater_than_recommended(self, mock_read_ndk):
mock_read_ndk.return_value.version = [MAX_NDK_VERSION + 1, 0, 5232133]
with self.assertLogs(level="INFO") as cm:
check_ndk_version(self.ndk_dir)
mock_read_ndk.assert_called_once_with(self.ndk_dir)
self.assertEqual(
cm.output,
[
"INFO:p4a:[INFO]: {}".format(
CURRENT_NDK_VERSION_MESSAGE.format(
ndk_version=MAX_NDK_VERSION + 1
)
),
"WARNING:p4a:[WARNING]: {}".format(
RECOMMENDED_NDK_VERSION_MESSAGE.format(
recommended_ndk_version=RECOMMENDED_NDK_VERSION
)
),
"WARNING:p4a:[WARNING]: {}".format(NEW_NDK_MESSAGE),
],
)
@mock.patch("pythonforandroid.recommendations.read_ndk_version")
def test_check_ndk_version_lower_than_recommended(self, mock_read_ndk):
mock_read_ndk.return_value.version = [MIN_NDK_VERSION - 1, 0, 5232133]
with self.assertRaises(BuildInterruptingException) as e:
check_ndk_version(self.ndk_dir)
self.assertEqual(
e.exception.args[0],
NDK_LOWER_THAN_SUPPORTED_MESSAGE.format(
min_supported=MIN_NDK_VERSION, ndk_url=NDK_DOWNLOAD_URL
),
)
mock_read_ndk.assert_called_once_with(self.ndk_dir)
@unittest.skipIf(running_in_py2, "`assertLogs` requires Python 3.4+")
def test_check_ndk_version_error(self):
"""
Test that a fake ndk dir give us two messages:
- first should be an `INFO` log
- second should be an `WARNING` log
"""
with self.assertLogs(level="INFO") as cm:
check_ndk_version(self.ndk_dir)
self.assertEqual(
cm.output,
[
"INFO:p4a:[INFO]: {}".format(UNKNOWN_NDK_MESSAGE),
"WARNING:p4a:[WARNING]: {}".format(
READ_ERROR_NDK_MESSAGE.format(ndk_dir=self.ndk_dir)
),
"WARNING:p4a:[WARNING]: {}".format(
ENSURE_RIGHT_NDK_MESSAGE.format(
min_supported=MIN_NDK_VERSION,
rec_version=RECOMMENDED_NDK_VERSION,
ndk_url=NDK_DOWNLOAD_URL,
)
),
],
)
@mock.patch("pythonforandroid.recommendations.open")
def test_read_ndk_version(self, mock_open_src_prop):
mock_open_src_prop.side_effect = [
mock.mock_open(
read_data="Pkg.Revision = 17.2.4988734"
).return_value
]
version = read_ndk_version(self.ndk_dir)
mock_open_src_prop.assert_called_once_with(
join(self.ndk_dir, "source.properties")
)
assert version == "17.2.4988734"
@unittest.skipIf(running_in_py2, "`assertLogs` requires Python 3.4+")
@mock.patch("pythonforandroid.recommendations.open")
def test_read_ndk_version_error(self, mock_open_src_prop):
mock_open_src_prop.side_effect = [
mock.mock_open(read_data="").return_value
]
with self.assertLogs(level="INFO") as cm:
version = read_ndk_version(self.ndk_dir)
self.assertEqual(
cm.output,
["INFO:p4a:[INFO]: {}".format(PARSE_ERROR_NDK_MESSAGE)],
)
mock_open_src_prop.assert_called_once_with(
join(self.ndk_dir, "source.properties")
)
assert version is None
def test_check_target_api_error_arch_armeabi(self):
with self.assertRaises(BuildInterruptingException) as e:
check_target_api(RECOMMENDED_TARGET_API, "armeabi")
self.assertEqual(
e.exception.args[0],
UNSUPPORTED_NDK_API_FOR_ARMEABI_MESSAGE.format(
req_ndk_api=RECOMMENDED_TARGET_API,
max_ndk_api=ARMEABI_MAX_TARGET_API,
),
)
@unittest.skipIf(running_in_py2, "`assertLogs` requires Python 3.4+")
def test_check_target_api_warning_target_api(self):
with self.assertLogs(level="INFO") as cm:
check_target_api(MIN_TARGET_API - 1, MIN_TARGET_API)
self.assertEqual(
cm.output,
[
"WARNING:p4a:[WARNING]: Target API 25 < 26",
"WARNING:p4a:[WARNING]: {old_api_msg}".format(
old_api_msg=OLD_API_MESSAGE
),
],
)
def test_check_ndk_api_error_android_api(self):
"""
Given an `android api` greater than an `ndk_api`, we should get an
`BuildInterruptingException`.
"""
ndk_api = MIN_NDK_API + 1
android_api = MIN_NDK_API
with self.assertRaises(BuildInterruptingException) as e:
check_ndk_api(ndk_api, android_api)
self.assertEqual(
e.exception.args[0],
TARGET_NDK_API_GREATER_THAN_TARGET_API_MESSAGE.format(
ndk_api=ndk_api, android_api=android_api
),
)
@unittest.skipIf(running_in_py2, "`assertLogs` requires Python 3.4+")
def test_check_ndk_api_warning_old_ndk(self):
"""
Given an `android api` lower than the supported by p4a, we should
get an `BuildInterruptingException`.
"""
ndk_api = MIN_NDK_API - 1
android_api = RECOMMENDED_TARGET_API
with self.assertLogs(level="INFO") as cm:
check_ndk_api(ndk_api, android_api)
self.assertEqual(
cm.output,
[
"WARNING:p4a:[WARNING]: {}".format(
OLD_NDK_API_MESSAGE.format(MIN_NDK_API)
)
],
)
def test_check_python_version(self):
"""With any version info lower than the minimum, we should get a
BuildInterruptingException with an appropriate message.
"""
with mock.patch('sys.version_info') as fake_version_info:
# Major version is Python 2 => exception
fake_version_info.major = MIN_PYTHON_MAJOR_VERSION - 1
fake_version_info.minor = MIN_PYTHON_MINOR_VERSION
with self.assertRaises(BuildInterruptingException) as context:
check_python_version()
assert context.exception.message == PY2_ERROR_TEXT
# Major version too low => exception
# Using a float valued major version just to test the logic and avoid
# clashing with the Python 2 check
fake_version_info.major = MIN_PYTHON_MAJOR_VERSION - 0.1
fake_version_info.minor = MIN_PYTHON_MINOR_VERSION
with self.assertRaises(BuildInterruptingException) as context:
check_python_version()
assert context.exception.message == PY_VERSION_ERROR_TEXT
# Minor version too low => exception
fake_version_info.major = MIN_PYTHON_MAJOR_VERSION
fake_version_info.minor = MIN_PYTHON_MINOR_VERSION - 1
with self.assertRaises(BuildInterruptingException) as context:
check_python_version()
assert context.exception.message == PY_VERSION_ERROR_TEXT
# Version high enough => nothing interesting happens
fake_version_info.major = MIN_PYTHON_MAJOR_VERSION
fake_version_info.minor = MIN_PYTHON_MINOR_VERSION
check_python_version()
def test_print_recommendations(self):
"""
Simple test that the function actually runs.
"""
# The main failure mode is if the function tries to print a variable
# that doesn't actually exist, so simply running to check all the
# prints work is the most important test.
print_recommendations()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dataproc.v1beta2 WorkflowTemplateService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.dataproc_v1beta2.gapic import enums
from google.cloud.dataproc_v1beta2.gapic import workflow_template_service_client_config
from google.cloud.dataproc_v1beta2.gapic.transports import workflow_template_service_grpc_transport
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import jobs_pb2
from google.cloud.dataproc_v1beta2.proto import jobs_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2_grpc
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-dataproc', ).version
class WorkflowTemplateServiceClient(object):
"""
The API interface for managing Workflow Templates in the
Cloud Dataproc API.
"""
SERVICE_ADDRESS = 'dataproc.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.dataproc.v1beta2.WorkflowTemplateService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
WorkflowTemplateServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def region_path(cls, project, region):
"""Return a fully-qualified region string."""
return google.api_core.path_template.expand(
'projects/{project}/regions/{region}',
project=project,
region=region,
)
@classmethod
def workflow_template_path(cls, project, region, workflow_template):
"""Return a fully-qualified workflow_template string."""
return google.api_core.path_template.expand(
'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}',
project=project,
region=region,
workflow_template=workflow_template,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.WorkflowTemplateServiceGrpcTransport,
Callable[[~.Credentials, type], ~.WorkflowTemplateServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = workflow_template_service_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=workflow_template_service_grpc_transport.
WorkflowTemplateServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_workflow_template(
self,
parent,
template,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates new workflow template.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> parent = client.region_path('[PROJECT]', '[REGION]')
>>>
>>> # TODO: Initialize `template`:
>>> template = {}
>>>
>>> response = client.create_workflow_template(parent, template)
Args:
parent (str): Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource\_names of the form
``projects/{project_id}/regions/{region}``
template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_workflow_template' not in self._inner_api_calls:
self._inner_api_calls[
'create_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_workflow_template,
default_retry=self.
_method_configs['CreateWorkflowTemplate'].retry,
default_timeout=self.
_method_configs['CreateWorkflowTemplate'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.CreateWorkflowTemplateRequest(
parent=parent,
template=template,
)
return self._inner_api_calls['create_workflow_template'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_workflow_template(self,
name,
version=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the latest workflow template.
Can retrieve previously instantiated template by specifying optional
version parameter.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> name = client.workflow_template_path('[PROJECT]', '[REGION]', '[WORKFLOW_TEMPLATE]')
>>>
>>> response = client.get_workflow_template(name)
Args:
name (str): Required. The "resource name" of the workflow template, as described in
https://cloud.google.com/apis/design/resource\_names of the form
``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}``
version (int): Optional. The version of workflow template to retrieve. Only previously
instatiated versions can be retrieved.
If unspecified, retrieves the current version.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_workflow_template' not in self._inner_api_calls:
self._inner_api_calls[
'get_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_workflow_template,
default_retry=self._method_configs['GetWorkflowTemplate'].
retry,
default_timeout=self.
_method_configs['GetWorkflowTemplate'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.GetWorkflowTemplateRequest(
name=name,
version=version,
)
return self._inner_api_calls['get_workflow_template'](
request, retry=retry, timeout=timeout, metadata=metadata)
def instantiate_workflow_template(
self,
name,
version=None,
instance_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Instantiates a template and begins execution.
The returned Operation can be used to track execution of workflow by
polling ``operations.get``. The Operation will complete when entire
workflow is finished.
The running workflow can be aborted via ``operations.cancel``. This will
cause any inflight jobs to be cancelled and workflow-owned clusters to
be deleted.
The ``Operation.metadata`` will be ``WorkflowMetadata``.
On successful completion, ``Operation.response`` will be ``Empty``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> name = client.workflow_template_path('[PROJECT]', '[REGION]', '[WORKFLOW_TEMPLATE]')
>>>
>>> response = client.instantiate_workflow_template(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The "resource name" of the workflow template, as described in
https://cloud.google.com/apis/design/resource\_names of the form
``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}``
version (int): Optional. The version of workflow template to instantiate. If specified,
the workflow will be instantiated only if the current version of
the workflow template has the supplied version.
This option cannot be used to instantiate a previous version of
workflow template.
instance_id (str): Optional. A tag that prevents multiple concurrent workflow instances
with the same tag from running. This mitigates risk of concurrent
instances started due to retries.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'instantiate_workflow_template' not in self._inner_api_calls:
self._inner_api_calls[
'instantiate_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.instantiate_workflow_template,
default_retry=self.
_method_configs['InstantiateWorkflowTemplate'].retry,
default_timeout=self.
_method_configs['InstantiateWorkflowTemplate'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest(
name=name,
version=version,
instance_id=instance_id,
)
operation = self._inner_api_calls['instantiate_workflow_template'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=workflow_templates_pb2.WorkflowMetadata,
)
def update_workflow_template(
self,
template,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates (replaces) workflow template. The updated template
must contain version that matches the current server version.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> # TODO: Initialize `template`:
>>> template = {}
>>>
>>> response = client.update_workflow_template(template)
Args:
template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The updated workflow template.
The ``template.version`` field must match the current version.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_workflow_template' not in self._inner_api_calls:
self._inner_api_calls[
'update_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_workflow_template,
default_retry=self.
_method_configs['UpdateWorkflowTemplate'].retry,
default_timeout=self.
_method_configs['UpdateWorkflowTemplate'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.UpdateWorkflowTemplateRequest(
template=template, )
return self._inner_api_calls['update_workflow_template'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_workflow_templates(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists workflows that match the specified filter in the request.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> parent = client.region_path('[PROJECT]', '[REGION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_workflow_templates(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_workflow_templates(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The "resource name" of the region, as described in
https://cloud.google.com/apis/design/resource\_names of the form
``projects/{project_id}/regions/{region}``
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_workflow_templates' not in self._inner_api_calls:
self._inner_api_calls[
'list_workflow_templates'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_workflow_templates,
default_retry=self.
_method_configs['ListWorkflowTemplates'].retry,
default_timeout=self.
_method_configs['ListWorkflowTemplates'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.ListWorkflowTemplatesRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_workflow_templates'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='templates',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def delete_workflow_template(
self,
name,
version=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a workflow template. It does not cancel in-progress workflows.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.WorkflowTemplateServiceClient()
>>>
>>> name = client.workflow_template_path('[PROJECT]', '[REGION]', '[WORKFLOW_TEMPLATE]')
>>>
>>> client.delete_workflow_template(name)
Args:
name (str): Required. The "resource name" of the workflow template, as described in
https://cloud.google.com/apis/design/resource\_names of the form
``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}``
version (int): Optional. The version of workflow template to delete. If specified,
will only delete the template if the current server version matches
specified version.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_workflow_template' not in self._inner_api_calls:
self._inner_api_calls[
'delete_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_workflow_template,
default_retry=self.
_method_configs['DeleteWorkflowTemplate'].retry,
default_timeout=self.
_method_configs['DeleteWorkflowTemplate'].timeout,
client_info=self._client_info,
)
request = workflow_templates_pb2.DeleteWorkflowTemplateRequest(
name=name,
version=version,
)
self._inner_api_calls['delete_workflow_template'](
request, retry=retry, timeout=timeout, metadata=metadata)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator to overrides the gradient for a function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape as tape_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import op_selector
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
VAR_OP_TYPES = [
"VariableV2",
"VarHandleOp",
]
def copy_handle_data(source_t, target_t):
"""Copies HandleData for variant and resource type tensors if available.
The CppShapeInferenceResult::HandleData proto contains information about the
shapes and types of the element tensors of resource/variant type tensors.
We need to copy this across function boundaries, i.e., when capturing a
placeholder or when returning a function tensor as output. If we don't do this
the element tensors will have unknown shapes, e.g., if a TensorList variant
tensor is captured as a placeholder, elements popped from that list would have
unknown shape.
Args:
source_t: The tensor to copy HandleData from.
target_t: The tensor to copy HandleData to.
"""
if (target_t.dtype == dtypes.resource or
target_t.dtype == dtypes.variant):
if isinstance(source_t, ops.EagerTensor):
handle_data = source_t._handle_data # pylint: disable=protected-access
else:
handle_data = resource_variable_ops.get_resource_handle_data(source_t)
if (handle_data is not None
and handle_data.is_set
and handle_data.shape_and_type):
# pylint: disable=protected-access
pywrap_tensorflow.SetHandleShapeAndType(target_t.graph._c_graph,
target_t._as_tf_output(),
handle_data.SerializeToString())
# pylint: enable=protected-access
# Ensure that shapes and dtypes are propagated.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [[d.size for d in s.dim] # pylint: disable=g-complex-comprehension
if not s.unknown_rank else None for s in shapes]
pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
target_t._op._graph._c_graph, # pylint: disable=protected-access
target_t._as_tf_output(), # pylint: disable=protected-access
shapes, ranks, types)
@tf_export("custom_gradient")
def custom_gradient(f=None):
"""Decorator to define a function with a custom gradient.
This decorator allows fine grained control over the gradients of a sequence
for operations. This may be useful for multiple reasons, including providing
a more efficient or numerically stable gradient for a sequence of operations.
For example, consider the following function that commonly occurs in the
computation of cross entropy and log likelihoods:
```python
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
```
Due to numerical instability, the gradient this function evaluated at x=100 is
NaN. For example:
```python
x = tf.constant(100.)
y = log1pexp(x)
dy = tf.gradients(y, x) # Will be NaN when evaluated.
```
The gradient expression can be analytically simplified to provide numerical
stability:
```python
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
```
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
Nesting custom gradients can lead to unintuitive results. The default
behavior does not correspond to n-th order derivatives. For example
```python
@tf.custom_gradient
def op(x):
y = op1(x)
@tf.custom_gradient
def grad_fn(dy):
gdy = op2(x, y, dy)
def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.
return op3(x, y, dy, ddy)
return gdy, grad_grad_fn
return y, grad_fn
```
The function `grad_grad_fn` will be calculating the first order gradient
of `grad_fn` with respect to `dy`, which is used to generate forward-mode
gradient graphs from backward-mode gradient graphs, but is not the same as
the second order gradient of `op` with respect to `x`.
Instead, wrap nested `@tf.custom_gradients` in another function:
```python
@tf.custom_gradient
def op_with_fused_backprop(x):
y, x_grad = fused_op(x)
def first_order_gradient(dy):
@tf.custom_gradient
def first_order_custom(unused_x):
def second_order_and_transpose(ddy):
return second_order_for_x(...), gradient_wrt_dy(...)
return x_grad, second_order_and_transpose
return dy * first_order_custom(x)
return y, first_order_gradient
```
Additional arguments to the inner `@tf.custom_gradient`-decorated function
control the expected return values of the innermost function.
See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
Note that if the decorated function uses `Variable`s, the enclosing variable
scope must be using `ResourceVariable`s.
Args:
f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:
- `x` is a sequence of `Tensor` inputs to the function.
- `y` is a `Tensor` or sequence of `Tensor` outputs of applying
TensorFlow operations in `f` to `x`.
- `grad_fn` is a function with the signature `g(*grad_ys)` which returns
a list of `Tensor`s - the derivatives of `Tensor`s in `y` with respect
to the `Tensor`s in `x`. `grad_ys` is a `Tensor` or sequence of
`Tensor`s the same size as `y` holding the initial value gradients for
each `Tensor` in `y`. In a pure mathematical sense, a vector-argument
vector-valued function `f`'s derivatives should be its Jacobian matrix
`J`. Here we are expressing the Jacobian `J` as a function `grad_fn`
which defines how `J` will transform a vector `grad_ys` when
left-multiplied with it (`grad_ys * J`). This functional representation
of a matrix is convenient to use for chain-rule calculation
(in e.g. the back-propagation algorithm).
If `f` uses `Variable`s (that are not part of the
inputs), i.e. through `get_variable`, then `grad_fn` should have
signature `g(*grad_ys, variables=None)`, where `variables` is a list of
the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where
`grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>`
with the derivatives of `Tensor`s in `y` with respect to the variables
(that is, grad_vars has one Tensor per variable in variables).
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
if f is None:
return lambda f: custom_gradient(f=f)
@Bind.decorator
def decorated(wrapped, args, kwargs):
"""Decorated function with custom gradient."""
if context.executing_eagerly():
return _eager_mode_decorator(wrapped, args, kwargs)
else:
return _graph_mode_decorator(wrapped, args, kwargs)
return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter
class Bind(object):
"""When called evaluates `d(f, args, kwargs)` but supports binding `f`.
>>> @Bind.decorator
... def my_decorator(f, args, kwargs):
... print("my_decorator called with", args, kwargs)
... return f(*args, **kwargs)
>>> class Foo(object):
... @my_decorator
... def bar(self, a, b, c):
... return a * b * c
>>> Foo.bar(None, 1, 2, c=3)
my_decorator called with (None, 1, 2) {'c': 3}
6
>>> foo = Foo()
>>> foo.bar(1, 2, c=3)
my_decorator called with (1, 2) {'c': 3}
6
"""
@classmethod
def decorator(cls, d):
return lambda f: Bind(f, d)
def __init__(self, f, d):
self._f = f
self._d = d
def __get__(self, instance, owner):
if instance is not None:
f = self._f.__get__(instance, owner)
return tf_decorator.make_decorator(f, Bind(f, self._d))
else:
return self
def __call__(self, *a, **k):
return self._d(self._f, a, k)
def get_variable_by_name(var_name):
"""Given a variable name, retrieves a handle on the tensorflow Variable."""
candidate_vars = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES, scope="{}:0".format(var_name))
if len(candidate_vars) >= 1:
# Filter out non-trainable variables.
candidate_vars = [v for v in candidate_vars if v.trainable]
else:
raise ValueError("Unsuccessful at finding variable {}.".format(var_name))
if len(candidate_vars) == 1:
return candidate_vars[0]
elif len(candidate_vars) > 1:
raise ValueError(
"Unsuccessful at finding trainable variable {}. "
"Number of candidates: {}. "
"Candidates: {}".format(var_name, len(candidate_vars), candidate_vars))
else:
# The variable is not trainable.
return None
def get_dependent_variables(input_ops, output_ops):
"""Finds variables involved in the subgraph b/w input_ops and output_ops."""
# avoids the edge-case when input_ops == output_ops.
output_ops = nest.map_structure(gen_array_ops.identity, output_ops)
inbetween_ops = op_selector.get_backward_walk_ops(
seed_ops=nest.flatten(output_ops),
stop_at_ts=nest.flatten(input_ops),
inclusive=False,
only_differentiable=True)
var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)
var_names = (op.name for op in var_ops)
tf_vars = (get_variable_by_name(var_name) for var_name in var_names)
tf_vars = [v for v in tf_vars if v is not None]
return tf_vars
def _graph_mode_decorator(f, args, kwargs):
"""Implement custom gradient decorator for graph mode."""
# TODO(rsepassi): Add support for kwargs
if kwargs:
raise ValueError(
"The custom_gradient decorator currently supports keywords "
"arguments only when eager execution is enabled.")
name = "CustomGradient-%s" % ops.uid()
args = [ops.convert_to_tensor(x) for x in args]
# Checking global and local variables attempts to ensure that no non-resource
# Variables are added to the graph.
current_var_scope = variable_scope.get_variable_scope()
before_vars = set(
[v.experimental_ref() for v in current_var_scope.global_variables() +
current_var_scope.local_variables()])
with backprop.GradientTape() as tape:
result, grad_fn = f(*args)
after_vars = set(
[v.experimental_ref() for v in current_var_scope.global_variables() +
current_var_scope.local_variables()])
new_vars = after_vars - before_vars
new_vars_list = [v.deref() for v in new_vars]
for v in new_vars_list:
if not resource_variable_ops.is_resource_variable(v):
raise TypeError(
"All variables used by a function wrapped with @custom_gradient must "
"be `ResourceVariable`s. Ensure that no `variable_scope` is created "
"with `use_resource=False`.")
# The variables that grad_fn needs to return gradients for are the set of
# variables used that are *not* part of the inputs.
inputs = args
variables_in_tape = frozenset([
v.experimental_ref() for v in tape.watched_variables()
]) - frozenset(v.experimental_ref() for v in inputs)
variables_in_subgraph = frozenset([
v.experimental_ref()
for v in get_dependent_variables(input_ops=inputs, output_ops=result)
])
variables = list(
[v.deref() for v in variables_in_subgraph.union(variables_in_tape)])
grad_argspec = tf_inspect.getfullargspec(grad_fn)
variables_in_signature = ("variables" in grad_argspec.args or
grad_argspec.varkw)
if variables and not variables_in_signature:
raise TypeError("If using @custom_gradient with a function that "
"uses variables, then grad_fn must accept a keyword "
"argument 'variables'.")
if variables_in_signature and not variables:
# User seems to intend to use variables but none were captured.
if not variable_scope.get_variable_scope().use_resource:
raise TypeError("If using @custom_gradient with a function that "
"uses variables, the enclosing variable scope must "
"have use_resource=True.")
else:
logging.warn("@custom_gradient grad_fn has 'variables' in signature, but "
"no ResourceVariables were used on the forward pass.")
flat_result = nest.flatten(result)
flat_result_len = len(flat_result)
all_tensors = flat_result + args + variables
def tape_grad_fn(*result_grads):
"""Custom grad fn wrapper."""
result_grads = result_grads[:flat_result_len]
if variables:
input_grads, variable_grads = grad_fn(*result_grads, variables=variables)
if len(variable_grads) != len(variables):
raise ValueError("Must return gradient for each variable from "
"@custom_gradient grad_fn.")
else:
input_grads = grad_fn(*result_grads)
variable_grads = []
# Need to return one value per input to the IdentityN, so pad the
# gradients of the inputs of the custom_gradient function with the
# gradients of the outputs as well.
input_grads = nest.flatten(input_grads)
return ([None] * flat_result_len) + input_grads + variable_grads
@ops.RegisterGradient(name)
def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable
"""Custom grad fn wrapper."""
return tape_grad_fn(*result_grads)
original_tensors = all_tensors
with ops.get_default_graph().gradient_override_map({"IdentityN": name}):
all_tensors = array_ops.identity_n(all_tensors)
original_tensors = [ops.convert_to_tensor(x) for x in original_tensors]
# Propagate handle data for happier shape inference for resource variables.
for i, t in enumerate(original_tensors):
if t.dtype == dtypes.resource and hasattr(t, "_handle_data"):
all_tensors[i]._handle_data = t._handle_data # pylint: disable=protected-access
tape_lib.record_operation(
f.__name__, all_tensors, original_tensors, tape_grad_fn)
for ot, t in zip(original_tensors, all_tensors):
copy_handle_data(ot, t)
return nest.pack_sequence_as(
structure=result, flat_sequence=all_tensors[:flat_result_len])
def _eager_mode_decorator(f, args, kwargs):
"""Implement custom gradient decorator for eager mode."""
with backprop.GradientTape() as tape:
result, grad_fn = f(*args, **kwargs)
all_inputs = list(args) + list(kwargs.values())
# The variables that grad_fn needs to return gradients for are the set of
# variables used that are *not* part of the inputs.
variables = [
v.deref() # pylint: disable=g-complex-comprehension
for v in set(v.experimental_ref() for v in tape.watched_variables())
if all(v.deref() is not i for i in all_inputs)
]
grad_argspec = tf_inspect.getfullargspec(grad_fn)
if (variables and ("variables" not in grad_argspec.args) and
not grad_argspec.varkw):
raise TypeError("If using @custom_gradient with a function that "
"uses variables, then grad_fn must accept a keyword "
"argument 'variables'.")
flat_result = nest.flatten(result)
# TODO(apassos) consider removing the identity below.
flat_result = [gen_array_ops.identity(x) for x in flat_result]
input_tensors = [ops.convert_to_tensor(x) for x
in list(args) + list(variables)]
recorded_inputs = input_tensors
arg_count = len(args)
def actual_grad_fn(*result_grads):
"""Custom grad fn wrapper."""
if variables:
input_grads, variable_grads = grad_fn(*result_grads, variables=variables)
if len(variable_grads) != len(variables):
raise ValueError("Must return gradient for each variable from "
"@custom_gradient grad_fn.")
else:
input_grads = grad_fn(*result_grads)
variable_grads = []
flat_grads = nest.flatten(input_grads)
if len(flat_grads) != arg_count:
raise ValueError(
"custom_gradient function expected to return", arg_count,
"gradients but returned", len(flat_grads), "instead.")
return nest.flatten(input_grads) + variable_grads
tape_lib.record_operation(f.__name__, flat_result, recorded_inputs,
actual_grad_fn)
flat_result = list(flat_result)
return nest.pack_sequence_as(result, flat_result)
@tf_export("recompute_grad")
def recompute_grad(f):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args, or to
gradients with respect to any variables residing in the kwarg 'variables'.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
# TODO(cdfreeman) Add is_recomputing functionality from graph mode version
@custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
result = f(*args, **kwargs)
def grad(dresult, variables=None):
"""Gradient function calculation for inner function."""
with backprop.GradientTape() as t:
t.watch(args)
if variables is not None:
t.watch(variables)
with ops.control_dependencies([dresult]):
result = f(*args, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = t.gradient(
result, list(args) + kw_vars, output_gradients=[dresult])
return grads[:len(args)], grads[len(args):]
return result, grad
return inner
@tf_export("grad_pass_through")
def grad_pass_through(f):
"""Creates a grad-pass-through op with the forward behavior provided in f.
Use this function to wrap any op, maintaining its behavior in the forward
pass, but replacing the original op in the backward graph with an identity.
For example:
```python
x = tf.Variable(1.0, name="x")
z = tf.Variable(3.0, name="z")
with tf.GradientTape() as tape:
# y will evaluate to 9.0
y = tf.grad_pass_through(x.assign)(z**2)
# grads will evaluate to 6.0
grads = tape.gradient(y, z)
```
Another example is a 'differentiable' moving average approximation, where
gradients are allowed to flow into the last value fed to the moving average,
but the moving average is still used for the forward pass:
```python
x = ... # Some scalar value
# A moving average object, we don't need to know how this is implemented
moving_average = MovingAverage()
with backprop.GradientTape() as tape:
# mavg_x will evaluate to the current running average value
mavg_x = tf.grad_pass_through(moving_average)(x)
grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0
```
Args:
f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor`
outputs.
Returns:
A function `h(x)` which returns the same values as `f(x)` and whose
gradients are the same as those of an identity function.
"""
@custom_gradient
def _grad_pass_through_op(*args, **kwargs):
def grad(*args, **kwargs):
variables = kwargs.get("variables")
if variables is not None:
# Variables involved in the wrapped op will not receive gradients.
return args, [None] * len(variables)
return args
return f(*args, **kwargs), grad
return tf_decorator.make_decorator(f, _grad_pass_through_op)
| |
"""A connector for Twitch."""
import asyncio
import hashlib
import hmac
import json
import logging
import os
import re
import secrets
import aiohttp
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid.const import (
TWITCH_API_ENDPOINT,
TWITCH_IRC_MESSAGE_REGEX,
TWITCH_JSON,
TWITCH_OAUTH_ENDPOINT,
TWITCH_WEBHOOK_ENDPOINT,
)
from opsdroid.events import BanUser, DeleteMessage, JoinRoom, LeaveRoom, Message
from . import events as twitch_event
CONFIG_SCHEMA = {
Required("code"): str,
Required("client-id"): str,
Required("client-secret"): str,
Required("channel"): str,
"redirect": str,
"forward-url": str,
"always-listening": bool,
}
_LOGGER = logging.getLogger(__name__)
class ConnectorTwitch(Connector):
"""A connector for Twitch."""
def __init__(self, config, opsdroid=None):
"""Set up all the needed things for the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Twitch connector."))
self.name = config.get("name", "twitch")
self.opsdroid = opsdroid
self.is_live = config.get("always-listening", False)
self.default_target = config["channel"]
self.token = None
self.code = config["code"]
self.client_id = config["client-id"]
self.client_secret = config["client-secret"]
self.redirect = config.get("redirect", "http://localhost")
self.bot_name = config.get("bot-name", "opsdroid")
self.websocket = None
self.webhook_lease_seconds = config.get(
"webhook-lease-seconds", 60 * 60 * 24
) # default 1 day
self.user_id = None
self.webhook_secret = secrets.token_urlsafe(18)
# TODO: Allow usage of SSL connection
self.server = "ws://irc-ws.chat.twitch.tv"
self.port = "80"
self.loop = asyncio.get_event_loop()
self.reconnections = 0
self.auth_file = TWITCH_JSON
try:
self.base_url = opsdroid.config["web"]["base-url"]
except KeyError:
self.base_url = config.get("forward-url")
async def validate_request(self, request, secret):
"""Compute sha256 hash of request and secret.
Twitch suggests that we should always validate the requests made to our webhook callback url,
that way we protect ourselves from received an event that wasn't sent by Twitch. After sending
``hub.secret`` on our webhook subscribe, Twitch will use that secret to send the ``x-hub-signature``
header, that is the hash that we should compare with our own computed one, if they don't match
then the request is not valid and shouldn't be parsed.
"""
signature = request.headers.get("x-hub-signature")
if signature:
signature = signature.replace("sha256=", "")
payload = await request.read()
computed_hash = hmac.new(
secret.encode(), msg=payload, digestmod=hashlib.sha256
).hexdigest()
return signature == computed_hash
async def get_user_id(self, channel, token, client_id):
"""Call twitch api to get broadcaster user id.
A lot of webhooks expect you to pass your user id in order to get the
notification when a user subscribes or folllows the broadcaster
channel.
Since we are calling the Twitch API to get our ``self.user_id`` on connect,
we will use this method to handle when a token has expired, so if we get a
401 status back from Twitch we will raise a ClientResponseError and send back
the status and the message Unauthorized, that way we can refresh the oauth token
on connect if the exception is raised.
Args:
channel (string): Channel that we wish to get the broadcaster id from.
token (string): OAuth token obtained from previous authentication.
client_id (string): Client ID obtained from creating a Twitch App to iteract with opsdroid.
Return:
string: Broadcaster/user id received from Twitch
Raises:
ConnectionError: Raised exception if we got an unauthorized code from twitch. Our
oauth token probably expired.
"""
async with aiohttp.ClientSession() as session:
response = await session.get(
f"{TWITCH_API_ENDPOINT}/users",
headers={"Authorization": f"Bearer {token}", "Client-ID": client_id},
params={"login": channel},
)
if response.status == 401:
raise ConnectionError("Unauthorized")
if response.status >= 400:
_LOGGER.warning(
_("Unable to receive broadcaster id - Error: %s, %s."),
response.status,
response.text,
)
response = await response.json()
return response["data"][0]["id"]
async def send_message(self, message):
"""Send message throught websocket.
To send a message to the Twitch IRC server through websocket we need to use the
same style, we will always send the command `PRIVMSG` and the channel we want to
send the message to. The message also comes after :.
Args:
message(string): Text message that should be sent to Twitch chat.
"""
await self.websocket.send_str(f"PRIVMSG #{self.default_target} :{message}")
def save_authentication_data(self, data):
"""Save data obtained from requesting authentication token."""
with open(self.auth_file, "w") as file:
json.dump(data, file)
def get_authorization_data(self):
"""Open file containing authentication data."""
with open(self.auth_file, "r") as file:
data = json.load(file)
return data
async def request_oauth_token(self):
"""Call Twitch and requests new oauth token.
This method assumes that the user already has the code obtained from
following the first oauth step which is making a get request to the
twitch api endpoint: ``https://id.twitch.tv/oauth2/authorize`` and passing
the needed client id, redirect uri and needed scopes to work with the bot.
This method is the second - and final step - when trying to get the oauth token.
We use the code that the user obtained on step one - check documentation - and
make a post request to Twitch to get the ``access_token`` and ``refresh_token`` so
we can refresh the access_token when needed. Note that the refresh_token doesn't
change with each refresh.
"""
async with aiohttp.ClientSession() as session:
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "authorization_code",
"redirect_uri": self.redirect,
"code": self.code,
}
resp = await session.post(TWITCH_OAUTH_ENDPOINT, params=params)
data = await resp.json()
try:
self.token = data["access_token"]
self.save_authentication_data(data)
except KeyError:
_LOGGER.warning(_("Unable to request oauth token - %s"), data)
async def refresh_token(self):
"""Attempt to refresh the oauth token.
Twitch oauth tokens expire after a day, so we need to do a post request to twitch
to get a new token when ours expires. The refresh token is already saved on the ``twitch.json``
file so we can just open that file, get the appropriate token and then update the file with the
new received data.
"""
_LOGGER.warning(_("Oauth token expired, attempting to refresh token."))
refresh_token = self.get_authorization_data()
async with aiohttp.ClientSession() as session:
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "refresh_token",
"redirect_uri": self.redirect,
"refresh_token": refresh_token["refresh_token"],
}
resp = await session.post(TWITCH_OAUTH_ENDPOINT, params=params)
data = await resp.json()
self.token = data["access_token"]
self.save_authentication_data(data)
async def send_handshake(self):
"""Send needed data to the websockets to be able to make a connection.
If we try to connect to Twitch with an expired oauth token, we need to
request a new token. The problem is that Twitch doesn't close the websocket
and will only notify the user that the login authentication failed after
we sent the ``PASS`` , ``NICK`` and ``JOIN`` command to the websocket.
So we need to send the initial commands to Twitch, await for a status with
``await self.websockets.recv()`` and there will be our notification that the
authentication failed in the form of ``:tmi.twitch.tv NOTICE * :Login authentication failed``
This method was created to prevent us from having to copy the same commands
and send them to the websocket. If there is an authentication issue, then we
will have to send the same commands again - just with a new token.
"""
await self.websocket.send_str(f"PASS oauth:{self.token}")
await self.websocket.send_str(f"NICK {self.bot_name}")
await self.websocket.send_str(f"JOIN #{self.default_target}")
await self.websocket.send_str("CAP REQ :twitch.tv/commands")
await self.websocket.send_str("CAP REQ :twitch.tv/tags")
await self.websocket.send_str("CAP REQ :twitch.tv/membership")
async def connect_websocket(self):
"""Connect to the irc chat through websockets.
Our connect method will attempt to make a connection to Twitch through the
websockets server. If the connection is made, any sort of failure received
from the websocket will be in the form of a ``NOTICE``, unless Twitch closes
the websocket connection.
In this method we attempt to connect to the websocket and use the previously
saved oauth token to join a twitch channel.
Once we are logged in and on a Twitch channel, we will request access to special
features from Twitch.
The ``commands`` request is used to allow us to send special commands to the Twitch
IRC server.
The ``tags`` request is used to receive more information with each message received
from twitch. Tags enable us to get metadata such as message ids.
The ``membership`` request is used to get notifications when an user enters the
chat server (it doesn't mean that the user is watching the streamer) and also when
a user leaves the chat channel.
"""
_LOGGER.info(_("Connecting to Twitch IRC Server."))
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"{self.server}:{self.port}", heartbeat=600
) as websocket:
self.websocket = websocket
await self.send_handshake()
await self.get_messages_loop()
async def webhook(self, topic, mode):
"""Subscribe to a specific webhook.
Twitch has different webhooks that you can subscribe to, when you subscribe to a
particular webhook, a ``post`` request needs to be made containing a ``JSON`` payload,
that tells Twitch what subscription you are attempting to do.
When you submit the ``post`` request to ``TWITCH_WEBHOOK_ENDPOINT`` , twitch will send back
a ``get`` request to your ``callback`` url (``hub.callback`` ) with a challenge. Twitch will
then await for a response containing only the challenge in plain text.
With this in mind, that is the reason why we open two routes (``get`` and ``post`` ) that link
to ``/connector/<connector name>``.
The ``hub.topic`` represents the webhook that we want to suscribe from twitch.
The ``hub.lease_seconds`` defines the number of seconds until the subscription expires, maximum
is 864000 seconds (10 days), but we will set up a day as our expiration since our app oauth
tokens seem to expire after a day.
Args:
topic (string): Twitch webhook url to subscribe/unsubscribe to.
mode (string): subscribe or unsuscribe to the webhook.
"""
_LOGGER.info(_("Attempting to connect to webhook %s."), topic)
if topic == "follows":
topic = f"{TWITCH_API_ENDPOINT}/users/follows?to_id={self.user_id}&first=1"
if topic == "stream changed":
topic = f"{TWITCH_API_ENDPOINT}/streams?user_id={self.user_id}"
if topic == "subscribers":
topic = f"{TWITCH_API_ENDPOINT}/subscriptions/events?broadcaster_id={self.user_id}&first=1"
headers = {"Client-ID": self.client_id, "Authorization": f"Bearer {self.token}"}
async with aiohttp.ClientSession() as session:
payload = {
"hub.callback": f"{self.base_url}/connector/{self.name}",
"hub.mode": mode,
"hub.topic": topic,
"hub.lease_seconds": self.webhook_lease_seconds,
"hub.secret": self.webhook_secret,
}
response = await session.post(
TWITCH_WEBHOOK_ENDPOINT, headers=headers, json=payload
)
if response.status >= 400:
_LOGGER.debug(_("Error: %s - %s"), response.status, response.text)
async def handle_challenge(self, request):
"""Challenge handler for get request made by Twitch.
Upon subscription to a Twitch webhook, Twitch will do a get request to the
``callback`` url provided to check if the url exists. Twitch will do a get request
with a challenge and expects the ``callback`` url to return that challenge in plain-text
back to Twitch.
This is what we are doing here, we are getting ``hub.challenge`` from the request and return
it in plain-text, if we can't find that challenge we will return a status code 500.
Args:
request (aiohttp.web.Request): Request made to the get route created for webhook subscription.
Returns:
aiohttp.web.Response: if request contains ``hub.challenge`` we return it, otherwise return status 500.
"""
challenge = request.rel_url.query.get("hub.challenge")
if challenge:
return aiohttp.web.Response(text=challenge)
_LOGGER.debug(_("Failed to get challenge from GET Request made by Twitch."))
return aiohttp.web.Response(status=500)
async def twitch_webhook_handler(self, request):
"""Handle event from Twitch webhooks.
This method will handle events when they are pushed to the webhook post route. Each webhook will
send a different kind of payload so we can handle each event and trigger the right opsdroid event
for the received payload.
For follow events the payload will contain ``from_id`` (broadcaster id), ``from_username`` (broadcaster username)
``to_id`` (follower id), ``to_name`` (follower name) and ``followed_at`` (timestamp).
For stream changes a lot more things are returned but we only really care about ``type`` (if live/offline)
``title`` (stream title).
For subscriptions events we will want to know ``event_type`` , ``timestamp`` , ``event_data.plan_name`` , ``event_data.is_gift`` ,
``event_data.tier`` , ``event_data.username`` and ``event_data.gifter_name``.
Args:
request (aiohttp.web.Request): Request made to the post route created for webhook subscription.
Return:
aiohttp.web.Response: Send a ``received`` message and status 200 - Twitch will keep sending the event if it doesn't get the 200 status code.
"""
valid = await self.validate_request(request, self.webhook_secret)
payload = await request.json()
if valid:
try:
[data] = payload.get("data")
_LOGGER.debug(_("Got event from Twitch - %s") % data)
if data.get("followed_at"):
_LOGGER.debug(_("Follower event received by Twitch."))
user_followed = twitch_event.UserFollowed(
follower=data["from_name"],
followed_at=data["followed_at"],
connector=self,
)
await self.opsdroid.parse(user_followed)
if data.get("started_at"):
_LOGGER.debug(_("Broadcaster went live event received by Twitch."))
self.is_live = True
await self.listen()
stream_started = twitch_event.StreamStarted(
title=data["title"],
viewers=data["viewer_count"],
started_at=data["started_at"],
connector=self,
)
await self.opsdroid.parse(stream_started)
if data.get("event_type") == "subscriptions.notification":
_LOGGER.debug(_("Subscriber event received by Twitch."))
user_subscription = twitch_event.UserSubscribed(
user=data["event_data"]["user_name"],
message=data["event_data"]["message"],
)
await self.opsdroid.parse(user_subscription)
if data.get("event_type") == "subscriptions.subscribe":
_LOGGER.debug(_("Subscriber event received by Twitch."))
user_subscription = twitch_event.UserSubscribed(
user=data["event_data"]["user_name"], message=None
)
await self.opsdroid.parse(user_subscription)
if data.get("event_type") == "subscriptions.subscribe" and data[
"event_data"
].get("is_gift"):
_LOGGER.debug(_("Gifted subscriber event received by Twitch."))
gifted_subscription = twitch_event.UserGiftedSubscription(
gifter_name=data["event_data"]["gifter_name"],
gifted_named=data["event_data"]["user_name"],
)
await self.opsdroid.parse(gifted_subscription)
except ValueError:
# When the stream goes offline, Twitch will return ```data: []```
# that will raise ValueError since it can't unpack empty list
stream_ended = twitch_event.StreamEnded(connector=self)
await self.opsdroid.parse(stream_ended)
if not self.config.get("always-listening"):
self.is_live = False
self.disconnect_websockets()
return aiohttp.web.Response(text=json.dumps("Received"), status=200)
return aiohttp.web.Response(text=json.dumps("Unauthorized"), status=401)
async def connect(self):
"""Connect to Twitch services.
Within our connect method we do a quick check to see if the file ``twitch.json`` exists in
the application folder, if this file doesn't exist we assume that it's the first time the
user is running opsdroid and we do the first request for the oauth token.
If this file exists then we just need to read from the file, get the token in the file and
attempt to connect to the websockets and subscribe to the Twitch events webhook.
"""
if not os.path.isfile(self.auth_file):
_LOGGER.info(
_("No previous authorization data found, requesting new oauth token.")
)
await self.request_oauth_token()
else:
_LOGGER.info(
_(
"Found previous authorization data, getting oauth token and attempting to connect."
)
)
self.token = self.get_authorization_data()["access_token"]
try:
self.user_id = await self.get_user_id(
self.default_target, self.token, self.client_id
)
except ConnectionError:
await self.refresh_token()
self.user_id = await self.get_user_id(
self.default_target, self.token, self.client_id
)
# Setup routes for webhooks subscription
self.opsdroid.web_server.web_app.router.add_get(
f"/connector/{self.name}", self.handle_challenge
)
self.opsdroid.web_server.web_app.router.add_post(
f"/connector/{self.name}", self.twitch_webhook_handler
)
await self.webhook("follows", "subscribe")
await self.webhook("stream changed", "subscribe")
await self.webhook("subscribers", "subscribe")
async def listen(self):
"""Listen method of the connector.
Every connector has to implement the listen method. When an
infinite loop is running, it becomes hard to cancel this task.
So we are creating a task and set it on a variable so we can
cancel the task.
If we need to reconnect to Twitch, Twitch will allow us to reconnect
immediatly on the first reconnect and then expects us to wait exponentially
to reconnect to the websocket.
"""
while self.is_live:
try:
await self.connect_websocket()
except ConnectionError as e:
_LOGGER.debug(e)
await asyncio.sleep(2 ** self.reconnections)
self.reconnections += 1
await self.connect_websocket()
async def get_messages_loop(self):
"""Listen for and parse messages.
Since we are using aiohttp websockets support we need to manually send a
pong response every time Twitch asks for it. We also need to handle if
the connection was closed and if it was closed but we are still live, then
a ConnectionError exception is raised so we can attempt to reconnect to the
chat server again.
"""
async for msg in self.websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
if "PING" in msg.data:
await self.websocket.send_str("PONG :tmi.twitch.tv")
await self._handle_message(msg.data)
if msg.type == aiohttp.WSMsgType.CLOSED:
await self.websocket.close()
if self.is_live:
raise ConnectionError(
"Connection to Twitch Chat Server dropped, reconnecting..."
)
async def _handle_message(self, message):
"""Handle message from websocket connection.
The message that we get from Twitch contains a lot of metadata, so we are using
regex named groups to get only the data that we need in order to parse a message
received.
We also need to check if whatever we received from the websocket is indeed a text
message or an event that we need to parse. We do a few checks to decide what should
be done with the message.
If opsdroid is running for a long time, the OAuth token will expire and the connection
to the websockets will send us back a ``:tmi.twitch.tv NOTICE * :Login authentication failed``
so if we receive that NOTICE we will attempt to refresh the token.
Twitch websockets send all the messages as strings, this includes PINGs, that means we will
keep getting PINGs as long as our connection is active, these messages tell us nothing important
so we made the decision to just hide them from the logs.
Args:
message (string): Message received from websocket.
"""
_LOGGER.debug(_("Got message from Twitch Connector chat - %s"), message)
chat_message = re.match(TWITCH_IRC_MESSAGE_REGEX, message)
join_event = re.match(r":(?P<user>.*)!.*JOIN", message)
left_event = re.match(r":(?P<user>.*)!.*PART ", message)
authentication_failed = re.match(
r":tmi.twitch.tv NOTICE \* :Login authentication failed", message
)
if authentication_failed:
self.refresh_token()
raise ConnectionError(
"OAuth token expire, need to reconnect to the chat service."
)
if chat_message:
text_message = Message(
text=chat_message.group("message").rstrip(),
user=chat_message.group("user"),
user_id=chat_message.group("user_id"),
raw_event=message,
target=f"#{self.default_target}",
event_id=chat_message.group("message_id"),
connector=self,
)
await self.opsdroid.parse(text_message)
if join_event:
joined_chat = JoinRoom(
user=join_event.group("user"),
raw_event=message,
target=f"#{self.default_target}",
connector=self,
)
await self.opsdroid.parse(joined_chat)
if left_event:
left_chat = LeaveRoom(
user=left_event.group("user"),
raw_event=message,
target=f"#{self.default_target}",
connector=self,
)
await self.opsdroid.parse(left_chat)
@register_event(Message)
async def _send_message(self, message):
"""Send message to twitch.
This method sends a text message to the chat service. We can't use the
default ``send`` method because we are also using different kinds of events
within this connector.
"""
_LOGGER.debug(_("Attempting to send %s to websocket!"), message.text)
await self.send_message(message.text)
@register_event(DeleteMessage)
async def remove_message(self, event):
"""Remove message from the chat.
This event is used when we need to remove a specific message from the chat
service. We need to pass the message id to remove a specific message. So this
method is calling the ``/delete`` method together with the message id to remove
that message.
"""
_LOGGER.debug(
_("DeleteMessage event fired - message with the id %s removed from chat"),
event.linked_event.event_id,
)
await self.send_message(f"/delete {event.linked_event.event_id}")
@register_event(BanUser)
async def ban_user(self, event):
"""Ban user from the channel.
This event will be used when we need to ban a specific user from the chat channel.
Banning a user will also remove all the messages sent by that user, so we don't need
to worry about removing a lot of mensages.
"""
_LOGGER.debug(
_("Ban event fired - user %s was banned from channel"), event.user
)
await self.send_message(f"/ban {event.user}")
@register_event(twitch_event.CreateClip)
async def create_clip(self):
"""Create clip from broadcast.
We send a post request to twitch to create a clip from the broadcast, Twitch will
return a response containing a clip ``id`` and ``edit_url`` . TWitch mentions that the
way to check if the clip was created successfully is by making a ``get`` request
to the ``clips`` API enpoint and query by the ``id`` obtained from the previous
request.
"""
async with aiohttp.ClientSession() as session:
headers = {
"Client-ID": self.client_id,
"Authorization": f"Bearer {self.token}",
}
resp = await session.post(
f"{TWITCH_API_ENDPOINT}/clips?broadcaster_id={self.user_id}",
headers=headers,
)
response = await resp.json()
clip_data = await session.get(
f"{TWITCH_API_ENDPOINT}/clips?id={response['data'][0]['id']}",
headers=headers,
)
if clip_data.status == 200:
resp = await clip_data.json()
[data] = resp.get("data")
_LOGGER.debug(_("Twitch clip created successfully."))
await self.send_message(data["embed_url"])
return
_LOGGER.debug(_("Failed to create Twitch clip %s"), response)
@register_event(twitch_event.UpdateTitle)
async def update_stream_title(self, event):
"""Update Twitch title.
To update your channel details you need to use Twitch API V5(kraken). The so called "New Twitch API"
doesn't have an enpoint to update the channel. To update your channel details you need to do a put
request and pass your title into the url.
Args:
event (twitch.events.UpdateTitle): opsdroid event containing ``status`` (your title).
"""
async with aiohttp.ClientSession() as session:
headers = {
"client-id": self.client_id,
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json",
}
param = {"title": event.status, "broadcaster_id": self.user_id}
resp = await session.patch(
f"{TWITCH_API_ENDPOINT}/channels",
headers=headers,
params=param,
)
if resp.status == 204:
_LOGGER.debug(_("Twitch channel title updated to %s"), event.status)
return
_LOGGER.debug(
_("Failed to update Twitch channel title. Error %s - %s"),
resp.status,
resp.message,
)
async def disconnect_websockets(self):
"""Disconnect from the websocket."""
self.is_live = False
close_method = getattr(self.websocket, "close", None)
if callable(close_method):
asyncio.ensure_future(close_method(), loop=self.loop)
self.websocket = None
async def disconnect(self):
"""Disconnect from twitch.
Before opsdroid exists we will want to disconnect the Twitch connector, we need to
do some clean up. We first set the while loop flag to False to stop the loop and then
try to unsubscribe from all the webhooks that we subscribed to on connect - we want to
do that because when we start opsdroid and the ``connect`` method is called we will send
another subscribe request to Twitch. After we will send a ``PART`` command to leave the
channel that we joined on connect.
Finally we try to close the websocket connection.
"""
if self.is_live:
await self.disconnect_websockets()
await self.webhook("follows", "unsubscribe")
await self.webhook("stream changed", "unsubscribe")
await self.webhook("subscribers", "unsubscribe")
return
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from typing import Any, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:ref:`apache-airflow:howto/connection:kubernetes`
:param conn_id: the connection to Kubernetes cluster
:type conn_id: str
"""
def __init__(
self, conn_id: str = "kubernetes_default", client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param body: crd object definition
:type body: Union[str, dict]
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param name: crd object name
:type name: str
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
"""Returns the namespace that defined in the connection"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
| |
"""
-------------------------------------------------------
views
holds all the views
-------------------------------------------------------
Author: Dallas Fraser
ID: 110242560
Email: fras2560@mylaurier.ca
Version: 2014-09-18
-------------------------------------------------------
"""
from flask import render_template, json, request, make_response
from inducer import app
from inducer.container import induced_subgraph, k_vertex
from pprint import PrettyPrinter
from inducer.helper import convert_to_networkx, convert_to_d3, text_to_d3
from inducer.helper import complement, join, d3_to_text
from inducer.dsatur import inducer_coloring as dcoloring
from inducer.backtracking import inducer_coloring as coloring
from inducer.EvenHoleFree import even_hole_free
from os.path import join as filepath
from os import getcwd
from inducer.clique_cutset import clique_cutset
from inducer.strong_stable_set import strong_stable_set
from inducer.critical import critical as is_critical
from inducer.isk4 import ISK4Free
pp = PrettyPrinter(indent=5)
ALLOWED_EXTENSIONS = set(['txt'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/")
def index():
return render_template('new_finder.html')
@app.route("/strong_stable_set", methods=["POST"])
def sss():
graph = json.loads(request.data)
g = convert_to_networkx(graph)
subgraph = strong_stable_set(g)
if subgraph is None:
subgraph = {'success': False}
else:
subgraph = convert_to_d3(subgraph)
subgraph['success'] = True
return json.dumps(subgraph)
@app.route("/critical", methods=["POST"])
def critical():
graph = json.loads(request.data)
g = convert_to_networkx(graph)
return json.dumps(is_critical(g))
@app.route("/clique_cutset", methods=["POST"])
def cutset():
graph = json.loads(request.data)
g = convert_to_networkx(graph)
subgraph = clique_cutset(g)
if subgraph is None:
subgraph = {'success': False}
else:
subgraph = convert_to_d3(subgraph)
subgraph['success'] = True
return json.dumps(subgraph)
@app.route("/evenholefree", methods=["POST"])
def ehf():
graph = json.loads(request.data)
g = convert_to_networkx(graph['G'])
subgraph = even_hole_free(g)
if subgraph is None:
subgraph = {'success': False}
else:
subgraph = convert_to_d3(subgraph)
subgraph['success'] = True
return json.dumps(subgraph)
@app.route("/isk4free", methods=["POST"])
def isk4():
graph = json.loads(request.data)
g = convert_to_networkx(graph['G'])
subgraph = ISK4Free(g).free()
if subgraph is None:
subgraph = {'success': False}
else:
subgraph = convert_to_d3(subgraph)
subgraph['success'] = True
return json.dumps(subgraph)
@app.route("/contains", methods=["POST"])
def contains():
print(request.data)
graphs = json.loads(request.data)
g = convert_to_networkx(graphs['G'])
h = convert_to_networkx(graphs['H'])
subgraph = induced_subgraph(g, h)
if subgraph is None:
subgraph = {'success': False}
else:
subgraph = convert_to_d3(subgraph)
subgraph['success'] = True
return json.dumps(subgraph)
@app.route("/loadGraph", methods=["POST"])
def load_graph():
file = request.files['file']
result = {'graph': None, 'success': False}
if file and allowed_file(file.filename):
content = (file.read()).decode("UTF-8")
print(content)
lines = content.replace("\r", "")
lines = lines.split("\n")
print(lines)
result['graph'] = text_to_d3(lines)
if result['graph'] is not None:
result['success'] = True
return json.dumps(result)
@app.route("/complement", methods=["POST"])
def complement_graph():
graph = json.loads(request.data)
g = convert_to_networkx(graph)
co_g = complement(g)
co_g = convert_to_d3(co_g)
return json.dumps(co_g)
@app.route("/k_vertex", methods=["POST"])
def k():
graphs = json.loads(request.data)
g = convert_to_networkx(graphs['G'])
subgraphs = []
for subgraph in graphs['subgraphs']:
subgraphs.append(convert_to_networkx(subgraph))
k_vertexes = k_vertex(g, subgraphs)
return json.dumps(k_vertexes)
@app.route("/join", methods=["POST"])
def join_graphs():
graphs = json.loads(request.data)
g = convert_to_networkx(graphs["G"])
h = convert_to_networkx(graphs["H"])
f = join(g, h)
f = convert_to_d3(f)
return json.dumps(f)
@app.route("/save_file", methods=["POST"])
def save_graph():
graph = json.loads(request.data)
graph = d3_to_text(graph)
fp = filepath(getcwd(), app.config['UPLOAD_FOLDER'], "graph.txt")
print(fp)
with open(fp, 'w') as f:
for line in graph:
f.write(line + "\n")
print(line)
return json.dumps("graph.txt")
@app.route("/<file_name>")
def getFile(file_name):
fp = filepath(getcwd(), app.config['UPLOAD_FOLDER'], file_name)
result = ""
with open(fp, "r") as f:
for line in f:
result += line
print(result)
response = make_response(result)
text = "attachment; filename=outbound.txt"
response.headers["Content-Disposition"] = text
return response
@app.route("/coloring", methods=["POST"])
def find_coloring():
print(request.data)
graph = json.loads(request.data)
graph = convert_to_networkx(graph)
colored = coloring(graph)
return json.dumps(colored)
@app.route("/dcoloring", methods=["POST"])
def find_dcoloring():
print(request.data)
graph = json.loads(request.data)
graph = convert_to_networkx(graph)
colored = dcoloring(graph)
return json.dumps(colored)
| |
import yt
import numpy as np
from galaxy_analysis.plot.plot_styles import *
fsize = 22
import matplotlib.pyplot as plt
from collections import Iterable, OrderedDict
import glob
import os
import h5py
import deepdish as dd
# parallel
from multiprocessing import Pool
from contextlib import closing
import itertools
# --- internal ---
from galaxy_analysis import Galaxy
from galaxy_analysis.utilities import utilities as utilities
#from galaxy_analysis.utilities import functions
#from galaxy_analysis.static_data import ISM
MAX_NUM = 700
GLOBAL_DR = 20.0 * yt.units.pc # fix this
# function to do this for a single data set
def stellar_environment(ds, data, dead_only = True, write_to_file = True,
output_file = 'stellar_environment.dat', dR = GLOBAL_DR):
"""
Goes through dataset and computes properties of local ISM
conditions for ALL stars. Either writes this to file,
using ``return_type == "file"'' or as a dictionary
with kwargs for each particle ID number.
"""
#
t_now = ds.current_time.convert_to_units('Myr').value
min_dx = ds.length_unit.to('pc') / ( ds.domain_dimensions[0] * 2.0**ds.max_level)
if dR is None:
dR = (ds.parameters['IndividualStarFeedbackStencilSize'] + 0.5) * min_dx
# gather properties for all stars
pid = data['particle_index'].value
ptype = data['particle_type'].value
M_o = data['birth_mass'].value # initial mass of particle
M_p = data['particle_mass'].convert_to_units('Msun').value
t_o = data['creation_time'].convert_to_units('Myr').value
lifetime = data[('io','particle_model_lifetime')].convert_to_units('Myr').value # MS star lifetime
r_cyl = data['particle_position_cylindrical_radius'].convert_to_units('pc').value
z_cyl = data['particle_position_z'].convert_to_units('pc').value - ds.domain_center[2].to('pc').value
age = t_now - t_o
dynamical_time = data['dynamical_time'].convert_to_units('Myr').value
# coordinates
x = data['x'].convert_to_units('pc')
y = data['y'].convert_to_units('pc')
z = data['z'].convert_to_units('pc')
px = data['particle_position_x'].convert_to_units('pc')
py = data['particle_position_y'].convert_to_units('pc')
pz = data['particle_position_z'].convert_to_units('pc')
# now compute the environment properties for all stars
prop = {}
if dead_only:
loop_indexes = np.where( ( (lifetime-age) > 0) * ( (lifetime-age) <= 1.0) )[0]
else:
loop_indexes = np.arange(np.size(pid)) # all stars
print(np.size(pid), np.size(loop_indexes))
for i in loop_indexes:
#if (dead_only and (lifetime - age > 1.0)): # skip for stars still alive
# continue
ID = int(pid[i])
r = np.sqrt( (x-px[i])**2 + (y-py[i])**2 + (z-pz[i])**2 )
select = r <= dR
#print i, pid[i], np.size(r[select])
if np.size(r[select]) == 0:
select = r <= np.min(r)
#print '---', i, pid[i], np.size(r[select]), np.min(r), dR
M = (data['cell_mass'].to('Msun'))[select]
V = (data['cell_volume'].to('cm**(3)'))[select]
n = (data['number_density'])[select]
T = (data['temperature'])[select]
prop[ID] = {}
prop[ ID ]['env'] = {'n_min' : np.min(n), 'n_max' : np.max(n),
'n_v_avg' : np.sum(n*V)/np.sum(V), 'n_med' : np.median(n),
'T_m_avg' : np.sum(T*M)/np.sum(M), 'T_v_avg' : np.sum(T*V)/np.sum(V),
'M_tot' : np.sum(M)}
prop[ID]['c_prop'] = {'M_o' : M_o[i], 'lifetime' : lifetime[i],
't_o' : t_o[i]}
prop[ID]['s_prop'] = {'age' : age[i], 'r_cyl' : r_cyl[i], 'z_cyl' : z_cyl[i],
'M' : M_p[i], 'ptype' : ptype[i], 'dyn_time' : dynamical_time[i]}
# if write_to_file:
# file.write("%i %i"%(ID,ptype[i]))
#
# results = [M_o[i], r_cyl[i], z_cyl[i], t_now, t_o[i], lifetime[i], age[i],
# prop[ID]['env']['n_min'], prop[ID]['env']['n_max'],
# prop[ID]['env']['n_v_avg'], prop[ID]['env']['n_med'], prop[ID]['env']['T_m_avg'],
# prop[ID]['env']['T_v_avg'], prop[ID]['env']['M_tot']]
#
# for val in results:
# file.write(" %4.4E"%(val))
#
# file.write("\n")
return prop
def _parallel_loop(dsname):
groupname = dsname.rsplit('/')[1]
print("starting computation on ", groupname)
gal = Galaxy(groupname)
dictionary = {groupname : {}}
g = dictionary[groupname]
g['Time'] = gal.ds.current_time.convert_to_units('Myr').value
# generalized function to loop through all mask types and compute stats
data = stellar_environment(gal.ds, gal.df)
for k in data.keys():
g[k] = data[k]
del(gal)
print("ending computation on ", groupname)
return dictionary
def compute_stats_all_datasets(overwrite = False,
dir = './', outfile = 'stellar_environment.h5',
write_to_text = True, text_file = 'stellar_environment.dat',
nproc = 24, dR = None):
if not (dR is None):
outfile = "%4.4f"%(dR.value) + outfile
text_file = "%4.4f"%(dR.value) + text_file
hdf5_filename = dir + outfile
if not os.path.isfile(hdf5_filename) or overwrite:
hf = h5py.File(hdf5_filename, 'w')
hf.close()
hf = dd.io.load(hdf5_filename)
ds_list = np.sort( glob.glob('./DD????/DD????'))
for i, dsname in enumerate(ds_list):
ds = yt.load(dsname)
if ds.parameters['NumberOfParticles'] > 0:
start_index = i
del(ds)
break
del(ds)
times = np.zeros(np.size(ds_list))
ds_list = np.array(ds_list[start_index:])
times = np.array(times[start_index:])
ds_list = ds_list[:np.min([np.size(ds_list),MAX_NUM])]
times = times[:np.min([np.size(ds_list),MAX_NUM])]
if write_to_text:
if not os.path.exists(text_file):
file = open(text_file,'w')
file.write("#PID ptype M_o M r z t t_o lifetime age dyn_time n_min n_max n_v_avg n_med T_m_avg T_v_avg M_tot\n")
else:
file = open(text_file,'a')
def _write(_file, prop, ID, t_now):
_file.write("%i %i"%(ID, prop['s_prop']['ptype']))
results = [prop['c_prop']['M_o'], prop['s_prop']['M'],
prop['s_prop']['r_cyl'],
prop['s_prop']['z_cyl'], t_now, prop['c_prop']['t_o'], prop['c_prop']['lifetime'], prop['s_prop']['age'],
prop['s_prop']['dyn_time'], prop['env']['n_min'], prop['env']['n_max'],
prop['env']['n_v_avg'], prop['env']['n_med'], prop['env']['T_m_avg'],
prop['env']['T_v_avg'], prop['env']['M_tot']]
for val in results:
file.write(" %4.4E"%(val))
file.write("\n")
return
####
if nproc == 1:
for i, dsname in enumerate(ds_list):
#print i, dsname
groupname = dsname.rsplit('/')[1]
gal = Galaxy(groupname)
hf[groupname] = {}
g = hf[groupname]
g['Time'] = gal.ds.current_time.convert_to_units('Myr').value
# generalized function to loop through all mask types and compute stats
data = stellar_environment(gal.ds, gal.df, dR = dR)
for k in data.keys():
g[k] = data[k]
if write_to_text:
for PID in g.keys():
if PID == 'Time':
continue
_write(file, g[PID], PID, g['Time'])
del(gal)
print("ending computation on ", groupname)
else: # parallel
# select out data sets that already exist in output
if not overwrite:
ds_list = [x for x in ds_list if ( not any( [x.rsplit('/')[1] in y for y in hf.keys() ]))]
# construct the pool, and map the results to a holder
# pool splits computation among processors
#
# do this in a loop, so we can save progressively once a full set of processors is complete
# saves you if there is a crash (kinda). This way we do better memory management if
# operating on many large datasets.
#
for sub_list in itertools.zip_longest(*(iter(ds_list),) * nproc):
sub_list = list(sub_list)
sub_list = [s for s in sub_list if s is not None] # remove None values
reduced_nproc = np.min( [len(sub_list), nproc] ) # only run on needed processors
pool = Pool(reduced_nproc)
results = pool.map_async(_parallel_loop, sub_list)
pool.close() # no more processes
pool.join() # wait and join running processes
# gather results and add to output
for r in results.get():
hf[r.keys()[0]] = r[r.keys()[0]]
if write_to_text:
for dsname in sub_list:
k = dsname.split('/')[1]
for PID in hf[k].keys():
if PID == 'Time':
continue
_write(file, hf[k][PID], PID, hf[k]['Time'])
del(results)
if write_to_text:
file.close()
dd.io.save(hdf5_filename, hf)
return
if __name__ == "__main__":
# do things here
compute_stats_all_datasets(nproc = 28, dR = GLOBAL_DR)
| |
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import warnings
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from megaman.utils.validation import check_array, check_symmetric, DataConversionWarning
from megaman.utils.testing import (assert_no_warnings, assert_warns,
ignore_warnings, assert_raise_message)
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
| |
import argparse
import os
import random
from fnmatch import fnmatchcase
import sys
import re
import shlex
import string
import pkg_resources
import itertools
import pluggy
import tox.interpreters
from tox import hookspecs
import py
import tox
iswin32 = sys.platform == "win32"
default_factors = {'jython': 'jython', 'pypy': 'pypy', 'pypy3': 'pypy3',
'py': sys.executable}
for version in '26,27,32,33,34,35,36'.split(','):
default_factors['py' + version] = 'python%s.%s' % tuple(version)
hookimpl = pluggy.HookimplMarker("tox")
def get_plugin_manager():
# initialize plugin manager
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.load_setuptools_entrypoints("tox")
pm.check_pending()
return pm
class Parser:
""" command line and ini-parser control object. """
def __init__(self):
self.argparser = argparse.ArgumentParser(
description="tox options", add_help=False)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def _parse_args(self, args):
return self.argparser.parse_args(args)
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""
Override the given dependency config name taking --force-dep-version
option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: Config instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""
Returns True if both dependency definitions refer to the
same package, even if versions differ.
"""
dep1_name = pkg_resources.Requirement.parse(dep1).project_name
dep2_name = pkg_resources.Requirement.parse(dep2).project_name
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg:
origpath = config.invocationcwd.join(arg, abs=True)
if origpath.check():
arg = testenv_config.changedir.bestrelpath(origpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if '{packages}' not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution")
return value
def parseconfig(args=None):
"""
:param list[str] args: Optional list of arguments.
:type pkg: str
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager()
if args is None:
args = sys.argv[1:]
# prepare command line options
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# parse command line options
option = parser._parse_args(args)
interpreters = tox.interpreters.Interpreters(hook=pm.hook)
config = Config(pluginmanager=pm, option=option, interpreters=interpreters)
config._parser = parser
config._testenv_attr = parser._testenv_attr
# parse ini file
basename = config.option.configfile
if os.path.isabs(basename):
inipath = py.path.local(basename)
else:
for path in py.path.local().parts(reverse=True):
inipath = path.join(basename)
if inipath.check():
break
else:
feedback("toxini file %r not found" % (basename), sysexit=True)
try:
parseini(config, inipath)
except tox.exception.InterpreterNotFound:
exn = sys.exc_info()[1]
# Use stdout to match test expectations
py.builtin.print_("ERROR: " + str(exn))
# post process config object
pm.hook.tox_configure(config=config)
return config
def feedback(msg, sysexit=False):
py.builtin.print_("ERROR: " + msg, file=sys.stderr)
if sysexit:
raise SystemExit(1)
class VersionAction(argparse.Action):
def __call__(self, argparser, *args, **kwargs):
version = tox.__version__
py.builtin.print_("%s imported from %s" % (version, tox.__file__))
raise SystemExit(0)
class CountAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(namespace, self.dest):
setattr(namespace, self.dest, int(getattr(namespace, self.dest)) + 1)
else:
setattr(namespace, self.dest, 0)
@hookimpl
def tox_addoption(parser):
# formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", nargs=0, action=VersionAction,
dest="version",
help="report version information to stdout.")
parser.add_argument("-h", "--help", action="store_true", dest="help",
help="show help about options")
parser.add_argument("--help-ini", "--hi", action="store_true", dest="helpini",
help="show help about ini-names")
parser.add_argument("-v", nargs=0, action=CountAction, default=0,
dest="verbosity",
help="increase verbosity of reporting output.")
parser.add_argument("--showconfig", action="store_true",
help="show configuration information for all environments. ")
parser.add_argument("-l", "--listenvs", action="store_true",
dest="listenvs", help="show list of test environments")
parser.add_argument("-c", action="store", default="tox.ini",
dest="configfile",
help="use the specified config file name.")
parser.add_argument("-e", action="append", dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).")
parser.add_argument("--notest", action="store_true", dest="notest",
help="skip invoking test commands.")
parser.add_argument("--sdistonly", action="store_true", dest="sdistonly",
help="only perform the sdist packaging activity.")
parser.add_argument("--installpkg", action="store", default=None,
metavar="PATH",
help="use specified package for installation into venv, instead of "
"creating an sdist.")
parser.add_argument("--develop", action="store_true", dest="develop",
help="install package in the venv using 'setup.py develop' via "
"'pip -e .'")
parser.add_argument('-i', action="append",
dest="indexurl", metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)")
parser.add_argument("--pre", action="store_true", dest="pre",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).")
parser.add_argument("-r", "--recreate", action="store_true",
dest="recreate",
help="force recreation of virtual environments")
parser.add_argument("--result-json", action="store",
dest="resultjson", metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.")
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument("--hashseed", action="store",
metavar="SEED", default=None,
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.")
parser.add_argument("--force-dep", action="append",
metavar="REQ", default=None,
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.")
parser.add_argument("--sitepackages", action="store_true",
help="override sitepackages setting to True in all envs")
parser.add_argument("--skip-missing-interpreters", action="store_true",
help="don't fail tests for missing interpreters")
parser.add_argument("args", nargs="*",
help="additional arguments available to command positional substitution")
# add various core venv interpreter attributes
parser.add_testenv_attribute(
name="envdir", type="path", default="{toxworkdir}/{envname}",
help="venv directory")
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp",
help="venv temporary directory")
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log",
help="venv log directory")
def downloadcache(testenv_config, value):
if value:
# env var, if present, takes precedence
downloadcache = os.environ.get("PIP_DOWNLOAD_CACHE", value)
return py.path.local(downloadcache)
parser.add_testenv_attribute(
name="downloadcache", type="string", default=None, postprocess=downloadcache,
help="(deprecated) set PIP_DOWNLOAD_CACHE.")
parser.add_testenv_attribute(
name="changedir", type="path", default="{toxinidir}",
help="directory to change to when running commands")
parser.add_testenv_attribute_obj(PosargsOption())
parser.add_testenv_attribute(
name="skip_install", type="bool", default=False,
help="Do not install the current package. This can be used when "
"you need the virtualenv management but do not want to install "
"the current package")
parser.add_testenv_attribute(
name="ignore_errors", type="bool", default=False,
help="if set to True all commands will be executed irrespective of their "
"result error status.")
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate", type="bool", default=False, postprocess=recreate,
help="always recreate this test environment.")
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv['PYTHONHASHSEED'] = config.hashseed
return setenv
parser.add_testenv_attribute(
name="setenv", type="dict", postprocess=setenv,
help="list of X=Y lines with environment variable settings")
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(
itertools.chain.from_iterable(
[x.split(' ') for x in value]))
passenv = set(["PATH", "PIP_INDEX_URL", "LANG"])
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
passenv.update(x for x in p.split() if x)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if sys.platform == "win32":
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("TEMP")
passenv.add("TMP")
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv", type="line-list", postprocess=passenv,
help="environment variables needed during executing test commands "
"(taken from invocation environment). Note that tox always "
"passes through some basic environment variables which are "
"needed for basic functioning of the Python system. "
"See --showconfig for the eventual passenv setting.")
parser.add_testenv_attribute(
name="whitelist_externals", type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.")
parser.add_testenv_attribute(
name="platform", type="string", default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.")
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
parser.add_testenv_attribute(
name="sitepackages", type="bool", default=False, postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.")
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre", type="bool", default=False, postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to "
"the install command. ")
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop)
parser.add_testenv_attribute(
name="usedevelop", type="bool", postprocess=develop, default=False,
help="install package in develop/editable mode")
def basepython_default(testenv_config, value):
if value is None:
for f in testenv_config.factors:
if f in default_factors:
return default_factors[f]
return sys.executable
return str(value)
parser.add_testenv_attribute(
name="basepython", type="string", default=None, postprocess=basepython_default,
help="executable name or path of interpreter used to create a "
"virtual test environment.")
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="commands", type="argvlist", default="",
help="each line specifies a test command and can use substitution.")
class Config(object):
""" Global Tox config object. """
def __init__(self, pluginmanager, option, interpreters):
#: dictionary containing envname to envconfig mappings
self.envconfigs = {}
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
#: option namespace containing all parsed command line options
self.option = option
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # XXX good idea?
return homedir
class TestenvConfig:
""" Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
@property
def envbindir(self):
""" path to directory where scripts/binaries reside. """
if (sys.platform == "win32"
and "jython" not in self.basepython
and "pypy" not in self.basepython):
return self.envdir.join("Scripts")
else:
return self.envdir.join("bin")
@property
def envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
# no @property to avoid early calling (see callable(subst[key]) checks)
def envsitepackagesdir(self):
""" return sitepackagesdir of the virtualenv environment.
(only available during execution, not parsing)
"""
self.getsupportedinterpreter() # for throwing exceptions
x = self.config.interpreters.get_sitepackagesdir(
info=self.python_info,
envdir=self.envdir)
return x
@property
def python_info(self):
""" return sitepackagesdir of the virtualenv environment. """
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if sys.platform == "win32" and self.basepython and \
"jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts")
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
'Failed to get version_info for %s: %s' % (info.name, info.err))
if info.version_info < (2, 6):
raise tox.exception.UnsupportedInterpreter(
"python2.5 is not supported anymore, sorry")
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if sys.platform == 'win32':
max_seed = 1024
return str(random.randint(1, max_seed))
class parseini:
def __init__(self, config, inipath):
config.toxinipath = inipath
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath)
config._cfg = self._cfg
self.config = config
ctxname = getcontextname()
if ctxname == "jenkins":
reader = SectionReader("tox:jenkins", self._cfg, fallbacksections=['tox'])
distshare_default = "{toxworkdir}/distshare"
elif not ctxname:
reader = SectionReader("tox", self._cfg)
distshare_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hashseed = make_hashseed()
elif config.option.hashseed == 'noset':
hashseed = None
else:
hashseed = config.option.hashseed
config.hashseed = hashseed
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
config.minversion = reader.getstring("minversion", None)
if not config.option.skip_missing_interpreters:
config.option.skip_missing_interpreters = \
reader.getbool("skip_missing_interpreters", False)
# determine indexserver dictionary
config.indexserver = {'default': IndexServerConfig('default')}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
override = False
if config.option.indexurl:
for urldef in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", urldef)
if m is None:
url = urldef
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
config.envlist, all_envs = self._getenvdata(reader)
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update(default_factors)
known_factors.add("python")
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split('-'))
# configure testenvs
for name in all_envs:
section = testenvprefix + name
factors = set(name.split('-'))
if section in self._cfg or factors <= known_factors:
config.envconfigs[name] = \
self.make_envconfig(name, section, reader._subs, config)
all_develop = all(name in config.envconfigs
and config.envconfigs[name].usedevelop
for name in config.envlist)
config.skipsdist = reader.getbool("skipsdist", all_develop)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r'^([\w{}\.,-]+)\:\s+', value, re.M)
factors.update(*mapcat(_split_factor_expr, exprs))
return factors
def make_envconfig(self, name, section, subs, config):
factors = set(name.split('-'))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"],
factors=factors)
vc = TestenvConfig(config=config, envname=name, factors=factors, reader=reader)
reader.addsubstitutions(**subs)
reader.addsubstitutions(envname=name)
for env_attr in config._testenv_attr:
atype = env_attr.type
if atype in ("bool", "path", "string", "dict", "argv", "argvlist"):
meth = getattr(reader, "get" + atype)
res = meth(env_attr.name, env_attr.default)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
else:
raise ValueError("unknown type %r" % (atype,))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=vc, value=res)
setattr(vc, env_attr.name, res)
if atype == "path":
reader.addsubstitutions(**{env_attr.name: res})
if env_attr.name == "install_command":
reader.addsubstitutions(envbindir=vc.envbindir, envpython=vc.envpython,
envsitepackagesdir=vc.envsitepackagesdir)
return vc
def _getenvdata(self, reader):
envstr = self.config.option.env \
or os.environ.get("TOXENV") \
or reader.getstring("envlist", replace=False) \
or []
envlist = _split_env(envstr)
# collect section envs
all_envs = set(envlist) - set(["ALL"])
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs.add(section.name[len(testenvprefix):])
if not all_envs:
all_envs.add("python")
if not envlist or "ALL" in envlist:
envlist = sorted(all_envs)
return envlist, all_envs
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if not isinstance(env, list):
if '\n' in env:
env = ','.join(env.split('\n'))
env = [env]
return mapcat(_expand_envstr, env)
def _split_factor_expr(expr):
partial_envs = _expand_envstr(expr)
return [set(e.split('-')) for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = re.split(r'((?:\{[^}]+\})+)|,', envstr)
envlist = [''.join(g).strip()
for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = re.split(r'\{([^}]+)\}', env)
parts = [token.split(',') for token in tokens]
return [''.join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __str__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":%s:%s" % (self.indexserver.name, self.name)
return str(self.name)
__repr__ = __str__
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
#: Check value matches substitution form
#: of referencing value from other section. E.g. {[base]commands}
is_section_substitution = re.compile("{\[[^{}\s]+\]\S+?}").match
RE_ITEM_REF = re.compile(
r'''
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>[^{}]*) # substitution key
[}]
''',
re.VERBOSE)
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=()):
self.section_name = section_name
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath):
toxinidir = self._subs['toxinidir']
path = self.getstring(name, defaultpath)
if path is not None:
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n"):
s = self.getstring(name, None)
if s is None:
return default or {}
value = {}
for line in s.split(sep):
if line.strip():
name, rest = line.split('=', 1)
value[name.strip()] = rest.strip()
return value
def getbool(self, name, default=None):
s = self.getstring(name, default)
if not s:
s = default
if s is None:
raise KeyError("no config value [%s] %s found" % (
self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"boolean value %r needs to be 'True' or 'False'")
return s
def getargvlist(self, name, default=""):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s)
def getargv(self, name, default=""):
return self.getargvlist(name, default)[0]
def getstring(self, name, default=None, replace=True):
x = None
for s in [self.section_name] + self.fallbacksections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
x = self._apply_factors(x)
if replace and x and hasattr(x, 'replace'):
self._subststack.append((self.section_name, name))
try:
x = self._replace(x)
finally:
assert self._subststack.pop() == (self.section_name, name)
# print "getstring", self.section_name, name, "returned", repr(x)
return x
def _apply_factors(self, s):
def factor_line(line):
m = re.search(r'^([\w{}\.,-]+)\:\s+(.+)', line)
if not m:
return line
expr, line = m.groups()
if any(fs <= self.factors for fs in _split_factor_expr(expr)):
return line
lines = s.strip().splitlines()
return '\n'.join(filter(None, map(factor_line, lines)))
def _replace_env(self, match):
match_value = match.group('substitution_value')
if not match_value:
raise tox.exception.ConfigError(
'env: requires an environment variable name')
default = None
envkey_split = match_value.split(':', 1)
if len(envkey_split) is 2:
envkey, default = envkey_split
else:
envkey = match_value
if envkey not in os.environ and default is None:
raise tox.exception.ConfigError(
"substitution env:%r: unknown environment variable %r" %
(envkey, envkey))
return os.environ.get(envkey, default)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1:]
if section in self._cfg and item in self._cfg[section]:
if (section, item) in self._subststack:
raise ValueError('%s already in %s' % (
(section, item), self._subststack))
x = str(self._cfg[section][item])
self._subststack.append((section, item))
try:
return self._replace(x)
finally:
self._subststack.pop()
raise tox.exception.ConfigError(
"substitution key %r not found" % key)
def _replace_substitution(self, match):
sub_key = match.group('substitution_value')
val = self._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if py.builtin.callable(val):
val = val()
return str(val)
def _replace_match(self, match):
g = match.groupdict()
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
sub_value = g['substitution_value']
if sub_value in ('opts', 'packages'):
return '{%s}' % sub_value
handlers = {
'env': self._replace_env,
None: self._replace_substitution,
}
try:
sub_type = g['sub_type']
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided")
try:
handler = handlers[sub_type]
except KeyError:
raise tox.exception.ConfigError("No support for the %s substitution type" % sub_type)
return handler(match)
def _replace(self, x):
if '{' in x:
return RE_ITEM_REF.sub(self._replace_match, x)
return x
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value):
"""Parse ``commands`` argvlist multiline string.
:param str name: Key name in a section.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
i = line.find("#")
if i != -1:
line = line[:i].rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " " + line[:-1]
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [%s] %s" %
(reader.section_name, "commands"))
return commands
@classmethod
def processcommand(cls, reader, command):
posargs = getattr(reader, "posargs", None)
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
if posargs:
newcommand += " ".join(posargs)
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += " ".join(posargs)
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_arg += new_word
newcommand += new_arg
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ''
shlexer.commenters = ''
argv = list(shlexer)
return argv
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ''
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return ((cur_char in string.whitespace and ps.word and
ps.word[-1] not in string.whitespace) or
(cur_char == '{' and ps.depth == 0 and not ps.word.endswith('\\')) or
(ps.depth == 0 and ps.word and ps.word[-1] == '}') or
(cur_char not in string.whitespace and ps.word and
ps.word.strip() == ''))
def yield_this_word():
yieldword = ps.word
ps.word = ''
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == '{':
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == '}':
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ['JENKINS_URL', 'HUDSON_URL']):
return 'jenkins'
return None
| |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class bitforex(Exchange):
def describe(self):
return self.deep_extend(super(bitforex, self).describe(), {
'id': 'bitforex',
'name': 'Bitforex',
'countries': ['CN'],
'rateLimit': 500, # https://github.com/ccxt/ccxt/issues/5054
'version': 'v1',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': None, # has but unimplemented
'future': False,
'option': False,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchMarkets': True,
'fetchMyTrades': None,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': None,
'fetchTicker': True,
'fetchTickers': None,
'fetchTrades': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'12h': '12hour',
'1d': '1day',
'1w': '1week',
'1M': '1month',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87295553-1160ec00-c50e-11ea-8ea0-df79276a9646.jpg',
'api': 'https://api.bitforex.com',
'www': 'https://www.bitforex.com',
'doc': 'https://github.com/githubdev2020/API_Doc_en/wiki',
'fees': 'https://help.bitforex.com/en_us/?cat=13',
'referral': 'https://www.bitforex.com/en/invitationRegister?inviterId=1867438',
},
'api': {
'public': {
'get': {
'api/v1/market/symbols': 20,
'api/v1/market/ticker': 4,
'api/v1/market/depth': 4,
'api/v1/market/trades': 20,
'api/v1/market/kline': 20,
},
},
'private': {
'post': {
'api/v1/fund/mainAccount': 1,
'api/v1/fund/allAccount': 30,
'api/v1/trade/placeOrder': 1,
'api/v1/trade/placeMultiOrder': 10,
'api/v1/trade/cancelOrder': 1,
'api/v1/trade/cancelMultiOrder': 20,
'api/v1/trade/cancelAllOrder': 20,
'api/v1/trade/orderInfo': 1,
'api/v1/trade/multiOrderInfo': 10,
'api/v1/trade/orderInfos': 20,
},
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.001'),
},
'funding': {
'tierBased': False,
'percentage': True,
'deposit': {},
'withdraw': {},
},
},
'commonCurrencies': {
'BKC': 'Bank Coin',
'CAPP': 'Crypto Application Token',
'CREDIT': 'TerraCredit',
'CTC': 'Culture Ticket Chain',
'EWT': 'EcoWatt Token',
'IQ': 'IQ.Cash',
'MIR': 'MIR COIN',
'NOIA': 'METANOIA',
'TON': 'To The Moon',
},
'exceptions': {
'1000': OrderNotFound, # {"code":"1000","success":false,"time":1643047898676,"message":"The order does not exist or the status is wrong"}
'1003': BadSymbol, # {"success":false,"code":"1003","message":"Param Invalid:param invalid -symbol:symbol error"}
'1013': AuthenticationError,
'1016': AuthenticationError,
'1017': PermissionDenied, # {"code":"1017","success":false,"time":1602670594367,"message":"IP not allow"}
'1019': BadSymbol, # {"code":"1019","success":false,"time":1607087743778,"message":"Symbol Invalid"}
'3002': InsufficientFunds,
'4002': InvalidOrder, # {"success":false,"code":"4002","message":"Price unreasonable"}
'4003': InvalidOrder, # {"success":false,"code":"4003","message":"amount too small"}
'4004': OrderNotFound,
'10204': DDoSProtection,
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetApiV1MarketSymbols(params)
#
# {
# "data": [
# {
# "amountPrecision":4,
# "minOrderAmount":3.0E-4,
# "pricePrecision":2,
# "symbol":"coin-usdt-btc"
# },
# ...
# ]
# }
#
data = response['data']
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
symbolParts = id.split('-')
baseId = symbolParts[2]
quoteId = symbolParts[1]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': True,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDateTime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_integer(market, 'amountPrecision'),
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderAmount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public) v1
#
# {
# "price":57594.53,
# "amount":0.3172,
# "time":1637329685322,
# "direction":1,
# "tid":"1131019666"
# }
#
# {
# "price":57591.33,
# "amount":0.002,
# "time":1637329685322,
# "direction":1,
# "tid":"1131019639"
# }
#
market = self.safe_market(None, market)
timestamp = self.safe_integer(trade, 'time')
id = self.safe_string(trade, 'tid')
orderId = None
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
sideId = self.safe_integer(trade, 'direction')
side = self.parse_side(sideId)
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'order': orderId,
'fee': None,
'takerOrMaker': None,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['size'] = limit
market = self.market(symbol)
response = await self.publicGetApiV1MarketTrades(self.extend(request, params))
#
# {
# "data":
# [
# {
# "price":57594.53,
# "amount":0.3172,
# "time":1637329685322,
# "direction":1,
# "tid":"1131019666"
# }
# ],
# "success": True,
# "time": 1637329688475
# }
#
return self.parse_trades(response['data'], market, since, limit)
def parse_balance(self, response):
data = response['data']
result = {'info': response}
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'frozen')
account['free'] = self.safe_string(balance, 'active')
account['total'] = self.safe_string(balance, 'fix')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostApiV1FundAllAccount(params)
return self.parse_balance(response)
def parse_ticker(self, ticker, market=None):
#
# {
# "buy":7.04E-7,
# "date":1643371198598,
# "high":7.48E-7,
# "last":7.28E-7,
# "low":7.10E-7,
# "sell":7.54E-7,
# "vol":9877287.2874
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'date')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_string(ticker, 'last'),
'last': self.safe_string(ticker, 'last'),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.markets[symbol]
request = {
'symbol': market['id'],
}
response = await self.publicGetApiV1MarketTicker(self.extend(request, params))
ticker = self.safe_value(response, 'data')
#
# {
# "data":{
# "buy":37082.83,
# "date":1643388686660,
# "high":37487.83,
# "last":37086.79,
# "low":35544.44,
# "sell":37090.52,
# "vol":690.9776
# },
# "success":true,
# "time":1643388686660
# }
#
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "close":0.02505143,
# "currencyVol":0,
# "high":0.02506422,
# "low":0.02505143,
# "open":0.02506095,
# "time":1591508940000,
# "vol":51.1869
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'vol'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'ktype': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit # default 1, max 600
response = await self.publicGetApiV1MarketKline(self.extend(request, params))
#
# {
# "data":[
# {"close":0.02505143,"currencyVol":0,"high":0.02506422,"low":0.02505143,"open":0.02506095,"time":1591508940000,"vol":51.1869},
# {"close":0.02503914,"currencyVol":0,"high":0.02506687,"low":0.02503914,"open":0.02505358,"time":1591509000000,"vol":9.1082},
# {"close":0.02505172,"currencyVol":0,"high":0.02507466,"low":0.02503895,"open":0.02506371,"time":1591509060000,"vol":63.7431},
# ],
# "success":true,
# "time":1591509427131
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
if limit is not None:
request['size'] = limit
response = await self.publicGetApiV1MarketDepth(self.extend(request, params))
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'time')
return self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 'price', 'amount')
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'canceled',
'4': 'canceled',
}
return statuses[status] if (status in statuses) else status
def parse_side(self, sideId):
if sideId == 1:
return 'buy'
elif sideId == 2:
return 'sell'
else:
return None
def parse_order(self, order, market=None):
id = self.safe_string(order, 'orderId')
timestamp = self.safe_number(order, 'createTime')
lastTradeTimestamp = self.safe_number(order, 'lastTime')
symbol = market['symbol']
sideId = self.safe_integer(order, 'tradeType')
side = self.parse_side(sideId)
type = None
price = self.safe_string(order, 'orderPrice')
average = self.safe_string(order, 'avgPrice')
amount = self.safe_string(order, 'orderAmount')
filled = self.safe_string(order, 'dealAmount')
status = self.parse_order_status(self.safe_string(order, 'orderState'))
feeSide = 'base' if (side == 'buy') else 'quote'
feeCurrency = market[feeSide]
fee = {
'cost': self.safe_number(order, 'tradeFee'),
'currency': feeCurrency,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'average': average,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'orderId': id,
}
response = await self.privatePostApiV1TradeOrderInfo(self.extend(request, params))
order = self.parse_order(response['data'], market)
return order
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 0,
}
response = await self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 1,
}
response = await self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
sideId = None
if side == 'buy':
sideId = 1
elif side == 'sell':
sideId = 2
request = {
'symbol': self.market_id(symbol),
'price': price,
'amount': amount,
'tradeType': sideId,
}
response = await self.privatePostApiV1TradePlaceOrder(self.extend(request, params))
data = response['data']
return {
'info': response,
'id': self.safe_string(data, 'orderId'),
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderId': id,
}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
results = await self.privatePostApiV1TradeCancelOrder(self.extend(request, params))
success = results['success']
returnVal = {'info': results, 'success': success}
return returnVal
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
payload = self.urlencode({'accessKey': self.apiKey})
query['nonce'] = self.milliseconds()
if query:
payload += '&' + self.urlencode(self.keysort(query))
# message = '/' + 'api/' + self.version + '/' + path + '?' + payload
message = '/' + path + '?' + payload
signature = self.hmac(self.encode(message), self.encode(self.secret))
body = payload + '&signData=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not isinstance(body, basestring):
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
feedback = self.id + ' ' + body
success = self.safe_value(response, 'success')
if success is not None:
if not success:
code = self.safe_string(response, 'code')
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
| |
import errno
import os
from functools import wraps
import re
import click
import sys
import logbook
import pandas as pd
from catalyst.marketplace.marketplace import Marketplace
from six import text_type
from catalyst.data import bundles as bundles_module
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.utils.exchange_utils import delete_algo_folder
from catalyst.utils.cli import Date, Timestamp
from catalyst.utils.run_algo import _run, load_extensions
from catalyst.exchange.utils.bundle_utils import EXCHANGE_NAMES
from catalyst.utils.remote import remote_backtest, get_remote_status
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a catalyst extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then catalyst will not run '
'if it cannot load all of the specified extensions. If this is '
'not passed or --non-strict-extensions is passed then the '
'failure will be logged but execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default catalyst extension.py file "
"in $CATALYST_HOME.",
)
@click.version_option()
def main(extension, strict_extensions, default_extension):
"""Top level catalyst entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be"
" any python expression. These are evaluated in order so they"
" may refer to previously defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default='poloniex',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf"
" will be written to stdout.",
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'-x',
'--exchange-name',
help='The name of the targeted exchange.',
)
@click.option(
'-n',
'--algo-namespace',
help='A label assigned to the algorithm for data storage purposes.'
)
@click.option(
'-c',
'--quote-currency',
help='The quote currency used to calculate statistics '
'(e.g. usd, btc, eth).',
)
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
exchange_name,
algo_namespace,
quote_currency):
"""Run a backtest for the given algorithm.
"""
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'"
" in backtest mode",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'"
" in backtest mode")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'"
" in backtest mode")
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if quote_currency is None:
ctx.fail("must specify a quote currency with '-c' in backtest mode")
if capital_base is None:
ctx.fail("must specify a capital base with '--capital-base'")
click.echo('Running in backtesting mode.', sys.stdout)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
data=None,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
live=False,
exchange=exchange_name,
algo_namespace=algo_namespace,
quote_currency=quote_currency,
analyze_live=None,
live_graph=False,
simulate_orders=True,
auth_aliases=None,
stats_output=None,
)
if output == '--':
pass
elif output == '-':
click.echo(str(perf), sys.stdout)
elif output != os.devnull: # make the catalyst magic not write any data
perf.to_pickle(output)
return perf
def catalyst_magic(line, cell=None):
"""The catalyst IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%catalyst' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'--capital-base',
type=float,
show_default=True,
help='The amount of capital (in quote_currency) allocated to trading.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be"
" any python expression. These are evaluated in order so they"
" may refer to previously defined names.",
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'-x',
'--exchange-name',
help='The name of the targeted exchange.',
)
@click.option(
'-n',
'--algo-namespace',
help='A label assigned to the algorithm for data storage purposes.'
)
@click.option(
'-c',
'--quote-currency',
help='The quote currency used to calculate statistics '
'(e.g. usd, btc, eth).',
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=False),
help='An optional future start date at '
'which the algorithm will start at live',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=False),
help='An optional end date at which to stop the execution.',
)
@click.option(
'--live-graph/--no-live-graph',
is_flag=True,
default=False,
help='Display live graph.',
)
@click.option(
'--simulate-orders/--no-simulate-orders',
is_flag=True,
default=True,
help='Simulating orders enable the paper trading mode. No orders will be '
'sent to the exchange unless set to false.',
)
@click.option(
'--auth-aliases',
default=None,
help='Authentication file aliases for the specified exchanges. By default,'
'each exchange uses the "auth.json" file in the exchange folder. '
'Specifying an "auth2" alias would use "auth2.json". It should be '
'specified like this: "[exchange_name],[alias],..." For example, '
'"binance,auth2" or "binance,auth2,bittrex,auth2".',
)
@click.pass_context
def live(ctx,
algofile,
capital_base,
algotext,
define,
output,
print_algo,
local_namespace,
exchange_name,
algo_namespace,
quote_currency,
start,
end,
live_graph,
auth_aliases,
simulate_orders):
"""Trade live with the given algorithm.
"""
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if algo_namespace is None:
ctx.fail("must specify an algorithm name '-n' in live execution mode")
if quote_currency is None:
ctx.fail("must specify a quote currency '-c' in live execution mode")
if capital_base is None:
ctx.fail("must specify a capital base with '--capital-base'")
if simulate_orders:
click.echo('Running in paper trading mode.', sys.stdout)
else:
click.echo('Running in live trading mode.', sys.stdout)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=None,
capital_base=capital_base,
data=None,
bundle=None,
bundle_timestamp=None,
start=start,
end=end,
output=output,
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
live=True,
exchange=exchange_name,
algo_namespace=algo_namespace,
quote_currency=quote_currency,
live_graph=live_graph,
analyze_live=None,
simulate_orders=simulate_orders,
auth_aliases=auth_aliases,
stats_output=None,
)
if output == '-':
click.echo(str(perf), sys.stdout)
elif output != os.devnull: # make the catalyst magic not write any data
perf.to_pickle(output)
return perf
@main.command(name='remote-run')
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be"
" any python expression. These are evaluated in order so they"
" may refer to previously defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default='poloniex',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-m',
'--mail',
show_default=True,
help='an E-mail address to send the results to',
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'-x',
'--exchange-name',
help='The name of the targeted exchange.',
)
@click.option(
'-n',
'--algo-namespace',
help='A label assigned to the algorithm for data storage purposes.'
)
@click.option(
'-c',
'--quote-currency',
help='The quote currency used to calculate statistics '
'(e.g. usd, btc, eth).',
)
@click.pass_context
def remote_run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
mail,
print_algo,
local_namespace,
exchange_name,
algo_namespace,
quote_currency):
"""Run a backtest for the given algorithm on the cloud.
"""
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'"
" in backtest mode",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'"
" in backtest mode")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'"
" in backtest mode")
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if quote_currency is None:
ctx.fail("must specify a quote currency with '-c' in backtest mode")
if capital_base is None:
ctx.fail("must specify a capital base with '--capital-base'")
if mail is None or not re.match(r"[^@]+@[^@]+\.[^@]+", mail):
ctx.fail("must specify a valid email with '--mail'")
algo_id = remote_backtest(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
data=None,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output='--',
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
live=False,
exchange=exchange_name,
algo_namespace=algo_namespace,
quote_currency=quote_currency,
analyze_live=None,
live_graph=False,
simulate_orders=True,
auth_aliases=None,
stats_output=None,
mail=mail,
)
print(algo_id)
return algo_id
@main.command(name='remote-status')
@click.option(
'-i',
'--algo-id',
show_default=True,
help='The algo id of your running algorithm on the cloud',
)
@click.option(
'-d',
'--data-output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data, if it exists. If this is '-' "
"the perf will be written to stdout.",
)
@click.option(
'-l',
'--log-output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the current logging. "
"If this is '-' the log will be written to stdout.",
)
@click.pass_context
def remote_status(ctx, algo_id, data_output, log_output):
"""
Get the status of a running algorithm on the cloud
"""
if algo_id is None:
ctx.fail("must specify an id of your running algorithm with '--id'")
status_response = get_remote_status(
algo_id=algo_id
)
if isinstance(status_response, tuple):
status, perf, log = status_response
if log_output == '-':
click.echo(str(log), sys.stderr)
elif log_output != os.devnull:
with open(log_output, 'w') as file:
file.write(log)
if perf is not None:
if data_output == '-':
click.echo('the performance data is:\n' +
str(perf), sys.stdout)
elif data_output != os.devnull:
# make the catalyst magic not write any data
perf.to_pickle(data_output)
print(status)
return status, perf
else:
print(status_response)
return status_response
@main.command(name='ingest-exchange')
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute', 'daily,minute', 'minute,daily'}),
default='daily',
show_default=True,
help='The data frequency of the desired OHLCV bars.',
)
@click.option(
'-s',
'--start',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The start date of the data range. (default: one year from end date)',
)
@click.option(
'-e',
'--end',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The end date of the data range. (default: today)',
)
@click.option(
'-i',
'--include-symbols',
default=None,
help='A list of symbols to ingest (optional comma separated list)',
)
@click.option(
'--exclude-symbols',
default=None,
help='A list of symbols to exclude from the ingestion '
'(optional comma separated list)',
)
@click.option(
'--csv',
default=None,
help='The path of a CSV file containing the data. If specified, start, '
'end, include-symbols and exclude-symbols will be ignored. Instead,'
'all data in the file will be ingested.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.option(
'--verbose/--no-verbose`',
default=False,
help='Show a progress indicator for every currency pair.'
)
@click.option(
'--validate/--no-validate`',
default=False,
help='Report potential anomalies found in data bundles.'
)
@click.pass_context
def ingest_exchange(ctx, exchange_name, data_frequency, start, end,
include_symbols, exclude_symbols, csv, show_progress,
verbose, validate):
"""
Ingest data for the given exchange.
"""
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
if not csv and exchange_name not in EXCHANGE_NAMES:
ctx.fail(
"ingest-exchange does not support {}, "
"please choose exchange from: {}".format(
exchange_name,
EXCHANGE_NAMES))
exchange_bundle = ExchangeBundle(exchange_name)
click.echo('Trying to ingest exchange bundle {}...'.format(exchange_name),
sys.stdout)
exchange_bundle.ingest(
data_frequency=data_frequency,
include_symbols=include_symbols,
exclude_symbols=exclude_symbols,
start=start,
end=end,
show_progress=show_progress,
show_breakdown=verbose,
show_report=validate,
csv=csv
)
@main.command(name='clean-algo')
@click.option(
'-n',
'--algo-namespace',
help='The label of the algorithm to for which to clean the state.'
)
@click.pass_context
def clean_algo(ctx, algo_namespace):
click.echo(
'Cleaning algo state: {}'.format(algo_namespace),
sys.stdout
)
delete_algo_folder(algo_namespace)
click.echo('Done', sys.stdout)
@main.command(name='clean-exchange')
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default=None,
help='The bundle data frequency to remove. If not specified, it will '
'remove both daily and minute bundles.',
)
@click.pass_context
def clean_exchange(ctx, exchange_name, data_frequency):
"""Clean up bundles from 'ingest-exchange'.
"""
if exchange_name is None:
ctx.fail("must specify an exchange name '-x'")
exchange_bundle = ExchangeBundle(exchange_name)
click.echo('Cleaning exchange bundle {}...'.format(exchange_name),
sys.stdout)
exchange_bundle.clean(
data_frequency=data_frequency,
)
click.echo('Done', sys.stdout)
@main.command()
@click.option(
'-b',
'--bundle',
metavar='BUNDLE-NAME',
default=None,
show_default=False,
help='The data bundle to ingest.',
)
@click.option(
'-x',
'--exchange-name',
help='The name of the exchange bundle to ingest.',
)
@click.option(
'-c',
'--compile-locally',
is_flag=True,
default=False,
help='Download dataset from source and compile bundle locally.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.pass_context
def ingest(ctx, bundle, exchange_name, compile_locally, assets_version,
show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
compile_locally,
)
@main.command()
@click.option(
'-b',
'--bundle',
default='poloniex',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-x',
'--exchange_name',
metavar='EXCHANGE-NAME',
show_default=True,
help='The exchange bundle name to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up bundles from 'ingest'.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp), sys.stdout)
@main.group()
@click.pass_context
def marketplace(ctx):
"""Access the Enigma Data Marketplace to:\n
- Register and Publish new datasets (seller-side)\n
- Subscribe and Ingest premium datasets (buyer-side)\n
"""
pass
@marketplace.command()
@click.pass_context
def ls(ctx):
"""List all available datasets.
"""
click.echo('Listing of available data sources on the marketplace:',
sys.stdout)
marketplace = Marketplace()
marketplace.list()
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def subscribe(ctx, dataset):
"""Subscribe to an existing dataset.
"""
marketplace = Marketplace()
marketplace.subscribe(dataset)
@marketplace.command() # noqa: F811
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.option(
'-f',
'--data-frequency',
type=click.Choice({'daily', 'minute', 'daily,minute', 'minute,daily'}),
default='daily',
show_default=True,
help='The data frequency of the desired OHLCV bars.',
)
@click.option(
'-s',
'--start',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The start date of the data range. (default: one year from end date)',
)
@click.option(
'-e',
'--end',
default=None,
type=Date(tz='utc', as_timestamp=True),
help='The end date of the data range. (default: today)',
)
@click.pass_context
def ingest(ctx, dataset, data_frequency, start, end):
"""Ingest a dataset (requires subscription).
"""
marketplace = Marketplace()
marketplace.ingest(dataset, data_frequency, start, end)
@marketplace.command() # noqa: F811
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def clean(ctx, dataset):
"""Clean/Remove local data for a given dataset.
"""
marketplace = Marketplace()
marketplace.clean(dataset)
@marketplace.command()
@click.pass_context
def register(ctx):
"""Register a new dataset.
"""
marketplace = Marketplace()
marketplace.register()
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def get_withdraw_amount(ctx, dataset):
"""Get withdraw amount owner is entitled to.
"""
marketplace = Marketplace()
marketplace.get_withdraw_amount(dataset)
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the dataset to ingest from the Data Marketplace.',
)
@click.pass_context
def withdraw(ctx, dataset):
"""Withdraw amount you are entitled to.
"""
marketplace = Marketplace()
marketplace.withdraw(dataset)
@marketplace.command()
@click.option(
'--dataset',
default=None,
help='The name of the Marketplace dataset to publish data for.',
)
@click.option(
'--datadir',
default=None,
help='The folder that contains the CSV data files to publish.',
)
@click.option(
'--watch/--no-watch',
is_flag=True,
default=False,
help='Whether to watch the datadir for live data.',
)
@click.pass_context
def publish(ctx, dataset, datadir, watch):
"""Publish data for a registered dataset.
"""
marketplace = Marketplace()
if dataset is None:
ctx.fail("must specify a dataset to publish data for "
" with '--dataset'\n")
if datadir is None:
ctx.fail("must specify a datadir where to find the files to publish "
" with '--datadir'\n")
marketplace.publish(dataset, datadir, watch)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
'''
Authors: Gert Mulder, Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Products/ETref
'''
# import general python modules
import os
import gdal
import numpy as np
import pandas as pd
import subprocess
import osr
import netCDF4
import glob
# import WA+ modules
from wa.General import data_conversions as DC
from wa.General import raster_conversions as RC
from wa.Products.ETref.SlopeInfluence_ETref import SlopeInfluence
def CollectLANDSAF(SourceLANDSAF, Dir, Startdate, Enddate, latlim, lonlim):
"""
This function collects and clip LANDSAF data
Keyword arguments:
SourceLANDSAF -- 'C:/' path to the LANDSAF source data (The directory includes SIS and SID)
Dir -- 'C:/' path to the WA map
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -60 and 60)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
"""
# Make an array of the days of which the ET is taken
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
# make directories
SISdir = os.path.join(Dir,'Landsaf_Clipped','SIS')
if os.path.exists(SISdir) is False:
os.makedirs(SISdir)
SIDdir= os.path.join(Dir,'Landsaf_Clipped','SID')
if os.path.exists(SIDdir) is False:
os.makedirs(SIDdir)
ShortwaveBasin(SourceLANDSAF, Dir, latlim, lonlim, Dates=[Startdate,Enddate])
DEMmap_str=os.path.join(Dir,'HydroSHED','DEM','DEM_HydroShed_m_3s.tif')
geo_out, proj, size_X, size_Y = RC.Open_array_info(DEMmap_str)
# Open DEM map
demmap = RC.Open_tiff_array(DEMmap_str)
demmap[demmap<0]=0
# make lat and lon arrays)
dlat = geo_out[5]
dlon = geo_out[1]
lat = geo_out[3] + (np.arange(size_Y)+0.5)*dlat
lon = geo_out[0] + (np.arange(size_X)+0.5)*dlon
for date in Dates:
# day of year
day=date.dayofyear
Horizontal, Sloping, sinb, sinb_hor, fi, slope, ID = SlopeInfluence(demmap,lat,lon,day)
SIDname = os.path.join(SIDdir,'SAF_SID_Daily_W-m2_' + date.strftime('%Y-%m-%d') + '.tif')
SISname = os.path.join(SISdir,'SAF_SIS_Daily_W-m2_' + date.strftime('%Y-%m-%d') + '.tif')
#PREPARE SID MAPS
SIDdest = RC.reproject_dataset_example(SIDname,DEMmap_str,method = 3)
SIDdata=SIDdest.GetRasterBand(1).ReadAsArray()
#PREPARE SIS MAPS
SISdest = RC.reproject_dataset_example(SISname,DEMmap_str,method = 3)
SISdata=SISdest.GetRasterBand(1).ReadAsArray()
# Calculate ShortWave net
Short_Wave_Net = SIDdata * (Sloping/Horizontal)+SISdata *86400/1e6
# Calculate ShortWave Clear
Short_Wave = Sloping
Short_Wave_Clear = Short_Wave *(0.75 + demmap * 2 * 10**-5)
# make directories
PathClear= os.path.join(Dir,'Landsaf_Clipped','Shortwave_Clear_Sky')
if os.path.exists(PathClear) is False:
os.makedirs(PathClear)
PathNet= os.path.join(Dir,'Landsaf_Clipped','Shortwave_Net')
if os.path.exists(PathNet) is False:
os.makedirs(PathNet)
# name Shortwave Clear and Net
nameFileNet='ShortWave_Net_Daily_W-m2_' + date.strftime('%Y-%m-%d') + '.tif'
nameNet= os.path.join(PathNet,nameFileNet)
nameFileClear='ShortWave_Clear_Daily_W-m2_' + date.strftime('%Y-%m-%d') + '.tif'
nameClear= os.path.join(PathClear,nameFileClear)
# Save net and clear short wave radiation
DC.Save_as_tiff(nameNet, Short_Wave_Net, geo_out, proj)
DC.Save_as_tiff(nameClear, Short_Wave_Clear, geo_out, proj)
return
def ShortwaveBasin(SourceLANDSAF, Dir, latlim, lonlim, Dates = ['2000-01-01','2013-12-31']):
"""
This function creates short wave maps based on the SIS and SID
Keyword arguments:
SourceLANDSAF -- 'C:/' path to the LANDSAF source data (The directory includes SIS and SID)
Dir -- 'C:/' path to the WA map
latlim -- [ymin, ymax] (values must be between -60 and 60)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Dates -- ['yyyy-mm-dd','yyyy-mm-dd']
"""
# Produces shortwave radiation grids for a particular basin or particular bounds
Types = ['SIS','SID']
Dates = pd.date_range(Dates[0],Dates[1],freq='D')
for Type in Types:
for Date in Dates:
SAFdir = os.path.join(SourceLANDSAF, Type)
OutPath = os.path.join(Dir, 'Landsaf_Clipped', Type, 'SAF_' + Type + '_EuropeAfrica_day_W-m2_' + Date.strftime('%Y-%m-%d') + '.tif')
if os.path.exists(SAFdir) is False:
os.mkdir(SAFdir)
# Convert nc to tiff files
Transform(SourceLANDSAF, OutPath, Type, Dates = [Date.strftime('%Y-%m-%d'),Date.strftime('%Y-%m-%d')])
# Get environmental variable
WA_env_paths = os.environ["WA_PATHS"].split(';')
GDAL_env_path = WA_env_paths[0]
GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# Define output name
nameOut= os.path.join(Dir,'Landsaf_Clipped',Type,'SAF_' + Type + '_daily_W-m2_' + Date.strftime('%Y-%m-%d') + '.tif')
# Create command for cmd
fullCmd = ' '.join(['"%s" -projwin %s %s %s %s' % (GDAL_TRANSLATE_PATH, lonlim[0]-0.1, latlim[1]+0.1, lonlim[1]+0.1, latlim[0]-0.1), '-of GTiff', OutPath, nameOut]) # -r {nearest}
# Run command prompt in cmd
process = subprocess.Popen(fullCmd)
process.wait()
print 'Landsaf ' + Type + ' file for ' + Date.strftime('%Y-%m-%d') + ' created.'
os.remove(OutPath)
def Transform(SourceLANDSAF, OutPath, Type, Dates = ['2000-01-01','2013-12-31']):
"""
This function creates short wave maps based on the SIS and SID
This function converts packed nc files to gtiff file of comparable file size
Keyword arguments:
SourceLANDSAF -- 'C:/' path to the LANDSAF source data (The directory includes SIS and SID)
Dir -- 'C:/' path to the WA map
latlim -- [ymin, ymax] (values must be between -60 and 60)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Dates -- ['yyyy-mm-dd','yyyy-mm-dd']
"""
path = os.path.join(SourceLANDSAF,Type)
os.chdir(path)
Dates = pd.date_range(Dates[0],Dates[1],freq='D')
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
projection = srs.ExportToWkt()
driver = gdal.GetDriverByName("GTiff")
for Date in Dates:
if Type == 'SIS':
ZipFile = glob.glob('SISdm%s*.nc.gz' % Date.strftime('%Y%m%d'))[0]
File = os.path.splitext(ZipFile)[0]
elif Type == 'SID':
ZipFile = glob.glob('*dm%s*.nc.gz' % Date.strftime('%Y%m%d'))[0]
File = os.path.splitext(ZipFile)[0]
# find path to the executable
fullCmd = ''.join("7z x %s -o%s -aoa" %(os.path.join(path,ZipFile),path))
process = subprocess.Popen(fullCmd)
process.wait()
NC = netCDF4.Dataset(File,'r+',format='NETCDF4')
Data = NC[Type][0,:,:]
lon = NC.variables['lon'][:][0] - 0.025
lat = NC.variables['lat'][:][-1] + 0.025
geotransform = [lon,0.05,0,lat,0,-0.05]
dst_ds = driver.Create(OutPath, int(np.size(Data,1)), int(np.size(Data,0)), 1, gdal.GDT_Float32, ['COMPRESS=DEFLATE'])
# set the reference info
dst_ds.SetProjection(projection)
dst_ds.SetGeoTransform(geotransform)
dst_ds.GetRasterBand(1).SetNoDataValue(-1)
dst_ds.GetRasterBand(1).WriteArray(np.flipud(Data))
NC.close()
del dst_ds, NC, Data
os.remove(File)
| |
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
import warnings
from inspect import signature
from contextlib import suppress
from math import log
import numpy as np
from scipy.special import expit
from scipy.special import xlogy
from scipy.optimize import fmin_bfgs
from .preprocessing import LabelEncoder
from .base import (BaseEstimator, ClassifierMixin, RegressorMixin, clone,
MetaEstimatorMixin)
from .preprocessing import label_binarize, LabelBinarizer
from .utils import check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted, check_consistent_length
from .utils.validation import _check_sample_weight
from .pipeline import Pipeline
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .utils.validation import _deprecate_positional_args
class CalibratedClassifierCV(ClassifierMixin,
MetaEstimatorMixin,
BaseEstimator):
"""Probability calibration with isotonic regression or logistic regression.
This class uses cross-validation to both estimate the parameters of a
classifier and subsequently calibrate a classifier. For each cv split it
fits a copy of the base estimator to the training folds, and calibrates it
using the testing fold. For prediction, predicted probabilities are
averaged across these individual calibrated classifiers.
Already fitted classifiers can be calibrated via the parameter cv="prefit".
In this case, no cross-validation is used and all provided data is used
for calibration. The user has to take care manually that data for model
fitting and calibration are disjoint.
The calibration is based on the :term:`decision_function` method of the
`base_estimator` if it exists, else on :term:`predict_proba`.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : estimator instance, default=None
The classifier whose output need to be calibrated to provide more
accurate `predict_proba` outputs. The default classifier is
a :class:`~sklearn.svm.LinearSVC`.
method : {'sigmoid', 'isotonic'}, default='sigmoid'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method (i.e. a logistic regression model) or
'isotonic' which is a non-parametric approach. It is not advised to
use isotonic calibration with too few calibration samples
``(<<1000)`` since it tends to overfit.
cv : integer, cross-validation generator, iterable or "prefit", \
default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
is used.
Refer to the :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that `base_estimator` has been
fitted already and all data is used for calibration.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_ : list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each cross-validation
split, which has been fitted on training folds and
calibrated on the testing fold.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.calibration import CalibratedClassifierCV
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> base_clf = GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(base_estimator=base_clf, cv=3)
>>> calibrated_clf.fit(X, y)
CalibratedClassifierCV(base_estimator=GaussianNB(), cv=3)
>>> len(calibrated_clf.calibrated_classifiers_)
3
>>> calibrated_clf.predict_proba(X)[:5, :]
array([[0.110..., 0.889...],
[0.072..., 0.927...],
[0.928..., 0.071...],
[0.928..., 0.071...],
[0.071..., 0.928...]])
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> X_train, X_calib, y_train, y_calib = train_test_split(
... X, y, random_state=42
... )
>>> base_clf = GaussianNB()
>>> base_clf.fit(X_train, y_train)
GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(
... base_estimator=base_clf,
... cv="prefit"
... )
>>> calibrated_clf.fit(X_calib, y_calib)
CalibratedClassifierCV(base_estimator=GaussianNB(), cv='prefit')
>>> len(calibrated_clf.calibrated_classifiers_)
1
>>> calibrated_clf.predict_proba([[-0.5, 0.5]])
array([[0.936..., 0.063...]])
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
@_deprecate_positional_args
def __init__(self, base_estimator=None, *, method='sigmoid', cv=None):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = indexable(X, y)
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
# Set `n_features_in_` attribute
if isinstance(self.base_estimator, Pipeline):
check_is_fitted(self.base_estimator[-1])
else:
check_is_fitted(self.base_estimator)
with suppress(AttributeError):
self.n_features_in_ = base_estimator.n_features_in_
self.classes_ = self.base_estimator.classes_
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
calibrated_classifier.fit(X, y, sample_weight)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
X, y = self._validate_data(
X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False, allow_nd=True
)
le = LabelBinarizer().fit(y)
self.classes_ = le.classes_
# Check that each cross-validation fold can have at least one
# example per class
if isinstance(self.cv, int):
n_folds = self.cv
elif hasattr(self.cv, "n_splits"):
n_folds = self.cv.n_splits
else:
n_folds = None
if n_folds and np.any([np.sum(y == class_) < n_folds
for class_ in self.classes_]):
raise ValueError(f"Requesting {n_folds}-fold cross-validation "
f"but provided less than {n_folds} examples "
"for at least one class.")
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
base_estimator_supports_sw = "sample_weight" in fit_parameters
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if not base_estimator_supports_sw:
estimator_name = type(base_estimator).__name__
warnings.warn("Since %s does not support sample_weights, "
"sample weights will only be used for the "
"calibration itself." % estimator_name)
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if sample_weight is not None and base_estimator_supports_sw:
this_estimator.fit(X[train], y[train],
sample_weight=sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method, classes=self.classes_)
sw = None if sample_weight is None else sample_weight[test]
calibrated_classifier.fit(X[test], y[test], sample_weight=sw)
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. The predicted class is the
class that has the highest probability, and can thus be different
from the prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self)
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance(kind=zeros)':
'zero sample_weight is not equivalent to removing samples',
}
}
class _CalibratedClassifier:
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
classes : array-like, shape (n_classes,), optional
Contains unique classes used to fit the base estimator.
if None, then classes is extracted from the given target values
in fit().
See also
--------
CalibratedClassifierCV
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
@_deprecate_positional_args
def __init__(self, base_estimator, *, method='sigmoid', classes=None):
self.base_estimator = base_estimator
self.method = method
self.classes = classes
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = self.label_encoder_.\
transform(self.base_estimator.classes_)
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
self.label_encoder_ = LabelEncoder()
if self.classes is None:
self.label_encoder_.fit(y)
else:
self.label_encoder_.fit(self.classes)
self.classes_ = self.label_encoder_.classes_
Y = label_binarize(y, classes=self.classes_)
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
P = expit(-(AB[0] * F + AB[1]))
loss = -(xlogy(T, P) + xlogy(T1, 1. - P))
if sample_weight is not None:
return (sample_weight * loss).sum()
else:
return loss.sum()
def grad(AB):
# gradient of the objective function
P = expit(-(AB[0] * F + AB[1]))
TEP_minus_T1P = T - P
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(RegressorMixin, BaseEstimator):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return expit(-(self.a_ * T + self.b_))
@_deprecate_positional_args
def calibration_curve(y_true, y_prob, *, normalize=False, n_bins=5,
strategy='uniform'):
"""Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
normalize : bool, default=False
Whether y_prob needs to be normalized into the [0, 1] interval, i.e.
is not a proper probability. If True, the smallest value in y_prob
is linearly mapped onto 0 and the largest one onto 1.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0. , 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
check_consistent_length(y_true, y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
y_true = label_binarize(y_true, classes=labels)[:, 0]
if strategy == 'quantile': # Determine bin edges by distribution of data
quantiles = np.linspace(0, 1, n_bins + 1)
bins = np.percentile(y_prob, quantiles * 100)
bins[-1] = bins[-1] + 1e-8
elif strategy == 'uniform':
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
else:
raise ValueError("Invalid entry to 'strategy' input. Strategy "
"must be either 'quantile' or 'uniform'.")
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = bin_true[nonzero] / bin_total[nonzero]
prob_pred = bin_sums[nonzero] / bin_total[nonzero]
return prob_true, prob_pred
| |
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import re
import io
import sys
from PIL import Image, ImageFile, _binary
__version__ = "0.5"
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def has_ghostscript():
if gs_windows_binary:
return True
if not sys.platform.startswith('win'):
import subprocess
try:
gs = subprocess.Popen(['gs', '--version'], stdout=subprocess.PIPE)
gs.stdout.read()
return True
except OSError:
# no ghostscript
pass
return False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (float((72.0 * size[0]) / (bbox[2]-bbox[0])),
float((72.0 * size[1]) / (bbox[3]-bbox[1])))
# print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
import os
import subprocess
import tempfile
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, 'name') and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# ignore length and offset!
# ghostscript can read it
# copy whole file to read in ghostscript
with open(infile_temp, 'wb') as f:
# fetch length of fp
fp.seek(0, 2)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100*1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build ghostscript command
command = ["gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages,
# safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % outfile, # output file
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
# adjust for image origin
"-f", infile, # input file
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(outfile)
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except:
pass
return im
class PSFile(object):
"""
Wrapper for bytesio object that treats either CR or LF as end of line.
"""
def __init__(self, fp):
self.fp = fp
self.char = None
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def readline(self):
s = self.char or b""
self.char = None
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
self.char = self.fp.read(1)
# line endings can be 1 or 2 of \r \n, in either order
if self.char in b"\r\n":
self.char = None
return s.decode('latin-1')
def _accept(prefix):
return prefix[:4] == b"%!PS" or \
(len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB"}
def _open(self):
(length, offset) = self._find_offset(self.fp)
# Rewrap the open file pointer in something that will
# convert line endings and decode to latin-1.
try:
if bytes is str:
# Python2, no encoding conversion necessary
fp = open(self.fp.name, "Ur")
else:
# Python3, can use bare open command.
fp = open(self.fp.name, "Ur", encoding='latin-1')
except:
# Expect this for bytesio/stringio
fp = PSFile(self.fp)
# go to offset - start of "%!PS"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline().strip('\r\n')
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(i)) for i in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0, 0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline().strip('\r\n')
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[:1] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[:11] == "%ImageData:":
# Encoded bitmapped image.
x, y, bi, mo = s[11:].split(None, 7)[:4]
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except:
break
self.size = int(x), int(y)
return
s = fp.readline().strip('\r\n')
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def _find_offset(self, fp):
s = fp.read(160)
if s[:4] == b"%!PS":
# for HEAD without binary preview
fp.seek(0, 2)
length = fp.tell()
offset = 0
elif i32(s[0:4]) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
offset = i32(s[4:8])
length = i32(s[8:12])
else:
raise SyntaxError("not an EPS file")
return (length, offset)
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def load_seek(self, *args, **kwargs):
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream(object):
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
if fp != sys.stdout:
fp = NoCloseStream(fp)
if sys.version_info[0] > 2:
fp = io.TextIOWrapper(fp, encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
if hasattr(fp, "flush"):
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
if hasattr(fp, "flush"):
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.