gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
:mod:`pyvx.vx` --- C-like Python API
==========================================
The functions specified by the `OpenVX`_ standard are provided in form of two
modules, :mod:`pyvx.vx` that provide the vxXxxfunctions and :class:`pyvx.vxu`
that provide the vxuXxx functions. Pleaserefer to the `OpenVX speficication`_
for a description of the API. The modulenames vx and vxu is used instead of a
vx/vxu prefix on all symbols. The initialexample on page 12 of the
specification would in python look like this:
.. code-block:: python
from pyvx import vx
context = vx.CreateContext()
images = [
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_UYVY),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_S16),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_U8),
]
graph = vx.CreateGraph(context)
virts = [
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
]
vx.ChannelExtractNode(graph, images[0], vx.CHANNEL_Y, virts[0])
vx.Gaussian3x3Node(graph, virts[0], virts[1])
vx.Sobel3x3Node(graph, virts[1], virts[2], virts[3])
vx.MagnitudeNode(graph, virts[2], virts[3], images[1])
vx.PhaseNode(graph, virts[2], virts[3], images[2])
status = vx.VerifyGraph(graph)
print status
if status == vx.SUCCESS:
status = vx.ProcessGraph(graph)
else:
print("Verification failed.")
vx.ReleaseContext(context)
For a compact example on how to call all the functions in the API check
out `test_vx.py`_.
.. _`OpenVX`: https://www.khronos.org/openvx
.. _`OpenVX speficication`: https://www.khronos.org/registry/vx/specs/OpenVX_1.0_Provisional_Specifications.zip
.. _`test_vx.py`: https://github.com/hakanardo/pyvx/tree/master/test/test_vx.py
The API is kept as
close as possible to the C API, but the few changes listed below were
made. Mostly due to the usage of pointers in C.
* The vx prefix is removed for each function name. The module name
forms a similar role in python.
* The *ReleaseXxx* and *RemoveNode* functions take a normal object (as
returned by the
corresponding CreateXxx) as argument and not a pointer to a pointer.
* Out arguments passed in as pointers are returned instead. The
returned tuple will contain the original return value as it's
first value and following it, the output arguments in the same
order as they apear in the C signature.
* In/Out arguemnts are passed in as values and then returned in the
same manner as the out arguments.
* Any python object implementing the buffer interface can be passed
instead of pointers to blocks of data. This includes both
*array.array* and *numpy.ndarray* objects.
* Python buffer objects are returned instead of pointers to blocks
of data.
* *QueryXxx* functions have the signature
.. code-block:: python
(status, value) = vx.QueryXxx(context, attribute, c_type, python_type=None)
where *c_type* is a string specifying the type of the attribute,
for example "vx_uint32", and *python_type* can be set to *str* for
string-valued attributes.
* *SetXxxAttribute* functions have the signature
.. code-block:: python
status = vx.SetXxxAttribute(context, attribute, value, c_type=None)
where *c_type* is a string specifying the type of the attribute,
for example "vx_uint32".
* *CreateUniformImage* have the signature
.. code-block:: python
image = vx.CreateUniformImage(context, width, height, color, value, c_type)
where value is a python *int* and *c_type* a string specifying
it's type. For example "vx_uint32".
* Normal python functions can be used instead of function pointers.
* *LoadKernels* can load python modules if it is passed a string that
is the name of an importable python module. In that case it will
import *PublishKernels* from it and call
*PublishKernels(context)*.
* *CreateScalar* and *WriteScalarValue* take a python int as value.
* Objects are not implicitly casted to/from references. Use
:func:`pyvx.vx.reference` and :func:`pyvx.vx.from_reference` instead.
* The typedefed structures called vx_xxx_t can be allocated using
vx.xxx_t(...). See below.
"""
from weakref import WeakKeyDictionary
import sys
from pyvx.types import *
keep_alive = WeakKeyDictionary()
_reference_types = {ffi.typeof(s)
for s in ['vx_context', 'vx_image', 'vx_graph', 'vx_node', 'vx_scalar',
'vx_delay', 'vx_lut', 'vx_distribution', 'vx_threshold', 'vx_kernel',
'vx_matrix', 'vx_convolution', 'vx_pyramid', 'vx_remap', 'vx_array',
'vx_parameter', 'vx_reference']}
def _get_attribute(func, ref, attribute, c_type, python_type):
if ffi.typeof(c_type).kind != 'array':
val = ffi.new(c_type + '*')
status = func(ref, attribute, val, ffi.sizeof(c_type))
val = val[0]
else:
val = ffi.new(c_type)
status = func(ref, attribute, val, ffi.sizeof(c_type))
if python_type is str:
val = ffi.string(val).decode("utf8")
elif python_type is not None:
val = python_type(val)
return status, val
def _set_attribute(func, ref, attribute, value, c_type):
if c_type is not None:
assert ffi.typeof(c_type).kind == 'primitive'
value = ffi.new(c_type + '*', value)
s = ffi.sizeof(ffi.typeof(value).item)
return func(ref, attribute, value, s)
def _enum2ctype(data_type):
data_type_name = ffi.string(ffi.cast("enum vx_type_e", data_type))
assert data_type_name.startswith('VX_TYPE_')
return 'vx_' + data_type_name[8:].lower()
def _callback(ctype, callback, parent, error):
callback = ffi.callback(ctype, error=error)(callback)
keep_alive.setdefault(parent, []).append(callback)
return callback
# CONTEXT
def ReleaseContext(context):
c = ffi.new('vx_context *', context)
return lib.vxReleaseContext(c)
def QueryContext(context, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryContext, context, attribute, c_type, python_type)
def SetContextAttribute(context, attribute, value, c_type=None):
return _set_attribute(lib.vxSetContextAttribute, context, attribute, value, c_type)
# IMAGE
def ReleaseImage(image):
ref = ffi.new('vx_image *', image)
return lib.vxReleaseImage(ref)
def QueryImage(image, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryImage, image, attribute, c_type, python_type)
def SetImageAttribute(image, attribute, value, c_type=None):
return _set_attribute(lib.vxSetImageAttribute, image, attribute, value, c_type)
def CreateUniformImage(context, width, height, color, value, c_type):
if ffi.typeof(c_type).kind != 'array':
c_type += '*'
value = ffi.new(c_type, value)
return lib.vxCreateUniformImage(context, width, height, color, value)
def CreateImageFromHandle(context, color, addrs, ptrs, import_type):
if not isinstance(addrs, (tuple, list)):
addrs = (addrs,)
if not isinstance(ptrs, (tuple, list)):
ptrs = (ptrs,)
addrs = ffi.new('vx_imagepatch_addressing_t[]', [a[0] for a in addrs])
ptrs = ffi.new('void *[]', [ffi.from_buffer(p) for p in ptrs])
return lib.vxCreateImageFromHandle(context, color, addrs, ptrs, import_type)
def AccessImagePatch(image, rect, plane_index, addr, ptr, usage):
if addr is None:
addr = ffi.new('vx_imagepatch_addressing_t *')
if ptr is not None:
ptr = ffi.from_buffer(ptr)
ptr_p = ffi.new('void **', ptr)
size = ComputeImagePatchSize(image, rect, plane_index)
status = lib.vxAccessImagePatch(image, rect, plane_index, addr, ptr_p, usage)
return status, addr, ffi.buffer(ptr_p[0], size)
def CommitImagePatch(image, rect, plane_index, addr, ptr):
ptr = ffi.from_buffer(ptr)
return lib.vxCommitImagePatch(image, rect, plane_index, addr, ptr)
def FormatImagePatchAddress1d(ptr, index, addr):
ptr = ffi.from_buffer(ptr)
p = lib.vxFormatImagePatchAddress1d(ptr, index, addr)
return ffi.buffer(p, addr.stride_x)
def FormatImagePatchAddress2d(ptr, x, y, addr):
ptr = ffi.from_buffer(ptr)
p = lib.vxFormatImagePatchAddress2d(ptr, x, y, addr)
return ffi.buffer(p, addr.stride_x)
def GetValidRegionImage(image):
rect = rectangle_t(0,0,0,0)
status = lib.vxGetValidRegionImage(image, rect)
return status, rect
# KERNEL
def ReleaseKernel(kernel):
ref = ffi.new('vx_kernel *', kernel)
return lib.vxReleaseKernel(ref)
def QueryKernel(kernel, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryKernel, kernel, attribute, c_type, python_type)
def SetKernelAttribute(kernel, attribute, value, c_type=None):
return _set_attribute(lib.vxSetKernelAttribute, kernel, attribute, value, c_type)
def AddKernel(context, name, enumeration, func_ptr, numParams, input, output, init, deinit):
func_ptr = _callback("vx_kernel_f", func_ptr, context, FAILURE)
input = _callback("vx_kernel_input_validate_f", input, context, FAILURE)
output = _callback("vx_kernel_output_validate_f", output, context, FAILURE)
if init is None:
init = ffi.NULL
else:
init = _callback("vx_kernel_initialize_f", init, context, FAILURE)
if deinit is None:
deinit = ffi.NULL
else:
deinit = _callback("vx_kernel_deinitialize_f", deinit, context, FAILURE)
return lib.vxAddKernel(context, name, enumeration, func_ptr, numParams, input, output, init, deinit)
def SetMetaFormatAttribute(meta, attribute, value, c_type=None):
return _set_attribute(lib.vxSetMetaFormatAttribute, meta, attribute, value, c_type)
def LoadKernels(context, module):
if sys.version_info > (3,) and not isinstance(module, bytes):
s = lib.vxLoadKernels(context, bytes(module, "utf8"))
else:
s = lib.vxLoadKernels(context, module)
if s == SUCCESS:
return s
try:
d = {}
exec("import %s as mod" % module, d)
mod = d['mod']
except ImportError:
return FAILURE
return mod.PublishKernels(context)
# GRAPH
def ReleaseGraph(graph):
ref = ffi.new('vx_graph *', graph)
return lib.vxReleaseGraph(ref)
def QueryGraph(graph, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryGraph, graph, attribute, c_type, python_type)
def SetGraphAttribute(graph, attribute, value, c_type=None):
return _set_attribute(lib.vxSetGraphAttribute, graph, attribute, value, c_type)
# NODE
def ReleaseNode(node):
ref = ffi.new('vx_node *', node)
return lib.vxReleaseNode(ref)
def QueryNode(node, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryNode, node, attribute, c_type, python_type)
def SetNodeAttribute(node, attribute, value, c_type=None):
return _set_attribute(lib.vxSetNodeAttribute, node, attribute, value, c_type)
def RemoveNode(node):
ref = ffi.new('vx_node *', node)
return lib.vxReleaseNode(ref)
def AssignNodeCallback(node, callback):
if callback is not None:
callback = _callback("vx_nodecomplete_f", callback, node, ACTION_ABANDON)
else:
callback = ffi.NULL
return lib.vxAssignNodeCallback(node, callback)
# PARAMETER
def ReleaseParameter(parameter):
ref = ffi.new('vx_parameter *', parameter)
return lib.vxReleaseParameter(ref)
def QueryParameter(parameter, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryParameter, parameter, attribute, c_type, python_type)
# SCALAR
def CreateScalar(context, data_type, value):
ptr = ffi.new(_enum2ctype(data_type) + '*', value)
return lib.vxCreateScalar(context, data_type, ptr)
def ReleaseScalar(scalar):
ref = ffi.new('vx_scalar *', scalar)
return lib.vxReleaseScalar(ref)
def QueryScalar(scalar, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryScalar, scalar, attribute, c_type, python_type)
def ReadScalarValue(scalar):
s, data_type = QueryScalar(scalar, SCALAR_ATTRIBUTE_TYPE, "vx_enum")
ptr = ffi.new(_enum2ctype(data_type) + '*')
s = lib.vxReadScalarValue(scalar, ptr)
return s, ptr[0]
def WriteScalarValue(scalar, value):
s, data_type = QueryScalar(scalar, SCALAR_ATTRIBUTE_TYPE, "vx_enum")
ptr = ffi.new(_enum2ctype(data_type) + '*', value)
return lib.vxWriteScalarValue(scalar, ptr)
# REFERENCE
def reference(reference):
"""
Cast the object *reference* into a "vx_reference" object.
"""
if ffi.typeof(reference) not in _reference_types:
raise TypeError("Can't cast %r to vx_reference" % reference)
return ffi.cast('vx_reference', reference)
def from_reference(ref):
"""
Cast the "vx_reference" object *ref* into it's specific type (i.e.
"vx_image" or "vx_graqph" or ...).
"""
s, data_type = QueryReference(ref, REF_ATTRIBUTE_TYPE, 'vx_enum')
return ffi.cast(_enum2ctype(data_type), ref)
def QueryReference(reference, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryReference, reference, attribute, c_type, python_type)
# DELAY
def ReleaseDelay(delay):
ref = ffi.new('vx_delay *', delay)
return lib.vxReleaseDelay(ref)
def QueryDelay(delay, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryDelay, delay, attribute, c_type, python_type)
# LOGGING
def RegisterLogCallback(context, callback, reentrant):
def wrapper(context, ref, status, string):
callback(context, ref, status, ffi.string(string))
cb = _callback('vx_log_callback_f', wrapper, context, None)
lib.vxRegisterLogCallback(context, cb, reentrant)
# LUT
def ReleaseLUT(lut):
ref = ffi.new('vx_lut *', lut)
return lib.vxReleaseLUT(ref)
def QueryLUT(lut, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryLUT, lut, attribute, c_type, python_type)
def AccessLUT(lut, ptr, usage):
if ptr is not None:
ptr = ffi.from_buffer(ptr)
ptr_p = ffi.new('void **', ptr)
status = lib.vxAccessLUT(lut, ptr_p, usage)
_, size = QueryLUT(lut, LUT_ATTRIBUTE_COUNT, 'vx_size')
return (status, ffi.buffer(ptr_p[0], size))
def CommitLUT(lut, ptr):
ptr = ffi.from_buffer(ptr)
return lib.vxCommitLUT(lut, ptr)
# DISTRIBUTION
def ReleaseDistribution(distribution):
ref = ffi.new('vx_distribution *', distribution)
return lib.vxReleaseDistribution(ref)
def QueryDistribution(distribution, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryDistribution, distribution, attribute, c_type, python_type)
def AccessDistribution(distribution, ptr, usage):
if ptr is not None:
ptr = ffi.from_buffer(ptr)
ptr_p = ffi.new('void **', ptr)
status = lib.vxAccessDistribution(distribution, ptr_p, usage)
_, size = QueryDistribution(distribution, DISTRIBUTION_ATTRIBUTE_SIZE, 'vx_size')
return (status, ffi.buffer(ptr_p[0], size))
def CommitDistribution(distribution, ptr):
ptr = ffi.from_buffer(ptr)
return lib.vxCommitDistribution(distribution, ptr)
# THRESHOLD
def ReleaseThreshold(threshold):
ref = ffi.new('vx_threshold *', threshold)
return lib.vxReleaseThreshold(ref)
def QueryThreshold(threshold, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryThreshold, threshold, attribute, c_type, python_type)
def SetThresholdAttribute(threshold, attribute, value, c_type=None):
return _set_attribute(lib.vxSetThresholdAttribute, threshold, attribute, value, c_type)
# MATRIX
def ReleaseMatrix(matrix):
ref = ffi.new('vx_matrix *', matrix)
return lib.vxReleaseMatrix(ref)
def QueryMatrix(matrix, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryMatrix, matrix, attribute, c_type, python_type)
def ReadMatrix(mat, array):
array = ffi.from_buffer(array)
return lib.vxReadMatrix(mat, array)
def WriteMatrix(mat, array):
array = ffi.from_buffer(array)
return lib.vxWriteMatrix(mat, array)
# CONVOLUTION
def ReleaseConvolution(convolution):
ref = ffi.new('vx_convolution *', convolution)
return lib.vxReleaseConvolution(ref)
def QueryConvolution(convolution, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryConvolution, convolution, attribute, c_type, python_type)
def SetConvolutionAttribute(convolution, attribute, value, c_type=None):
return _set_attribute(lib.vxSetConvolutionAttribute, convolution, attribute, value, c_type)
def WriteConvolutionCoefficients(conv, array):
array = ffi.from_buffer(array)
return lib.vxWriteConvolutionCoefficients(conv, array)
def ReadConvolutionCoefficients(conv, array):
array = ffi.from_buffer(array)
return lib.vxReadConvolutionCoefficients(conv, array)
# PYRAMID
def ReleasePyramid(pyramid):
ref = ffi.new('vx_pyramid *', pyramid)
return lib.vxReleasePyramid(ref)
def QueryPyramid(pyramid, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryPyramid, pyramid, attribute, c_type, python_type)
# REMAP
def ReleaseRemap(remap):
ref = ffi.new('vx_remap *', remap)
return lib.vxReleaseRemap(ref)
def QueryRemap(remap, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryRemap, remap, attribute, c_type, python_type)
def GetRemapPoint(table, dst_x, dst_y):
src_x = ffi.new('vx_float32 *')
src_y = ffi.new('vx_float32 *')
status = lib.vxGetRemapPoint(table, dst_x, dst_y, src_x, src_y)
return status, src_x[0], src_y[0]
# ARRAY
def ReleaseArray(array):
ref = ffi.new('vx_array *', array)
return lib.vxReleaseArray(ref)
def QueryArray(array, attribute, c_type, python_type=None):
return _get_attribute(lib.vxQueryArray, array, attribute, c_type, python_type)
def FormatArrayPointer(ptr, index, stride):
if sys.version_info > (3,):
return memoryview(ptr)[index * stride:]
else:
return buffer(ptr, index * stride)
def ArrayItem(type, ptr, index, stride):
return ffi.cast(type + '*', ffi.from_buffer(FormatArrayPointer(ptr, index, stride)))
def AddArrayItems(arr, count, ptr, stride):
if not isinstance(ptr, ffi.CData):
ptr = ffi.from_buffer(ptr)
return lib.vxAddArrayItems(arr, count, ptr, stride)
def AccessArrayRange(arr, start, end, stride, ptr, usage):
if ptr is not None:
ptr = ffi.from_buffer(ptr)
ptr_p = ffi.new('void **', ptr)
stride_p = ffi.new('vx_size *', stride)
status = lib.vxAccessArrayRange(arr, start, end, stride_p, ptr_p, usage)
_, item_size = QueryArray(arr, ARRAY_ATTRIBUTE_ITEMSIZE, 'vx_size')
return (status, stride_p[0], ffi.buffer(ptr_p[0], item_size * (end - start + 1)))
def CommitArrayRange(arr, start, end, ptr):
ptr = ffi.from_buffer(ptr)
return lib.vxCommitArrayRange(arr, start, end, ptr)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Overall Model for the linear program.
"""
import pulp
import time
from collections import defaultdict
# C Libraries
import pandas as pd
class SPDModel(object):
"""SPDModel
Container for setting up, solving, and organising the results
of a Simulation. Contains three primary API methods.
Creation of the Linear Program, solving the LP and parsing the results.
Usage:
------
solver = SPDModel(SystemOperator)
solver.create_lp()
solver.solve_lp()
solver.parse_result()
"""
def __init__(self, ISO):
super(SPDModel, self).__init__()
self.ISO = ISO
ISO.SPD = self
def full_run(self, solver=pulp.PULP_CBC_CMD()):
""" Convenience function to compile a full run """
self.create_lp()
self.solve_lp(solver)
def create_lp(self):
""" Publically exposed API
Creates the Linear program including applying the objective
function and adding all of the necessary constraints.
This exists as a wrapper around a number of hidden functions.
"""
self._setup_lp()
self._create_variables()
self._obj_function()
self._nodal_demand()
self._energy_offers()
self._reserve_offers()
self._transmission_offer()
self._reserve_proportion()
self._reserve_combined()
self._generator_risk()
self._transmission_risk()
self._reserve_dispatch()
def write_lp(self, fName=None):
""" Write the Linear Program to a file """
self.lp.writeLP(fName)
def solve_lp(self, solver=pulp.COIN_CMD()):
""" Solve the Linear Program including the time taken to solve it """
begin = time.time()
self.lp.solve(solver)
self.solution_time = time.time() - begin
def _setup_lp(self):
""" Setup a Linear Program from a defined ISO instance
Contains several convenience mappings to shorten line lengths
"""
# Set up a Linear Program
self.lp = pulp.LpProblem("SPD Dispatch", pulp.LpMinimize)
self.addC = self.lp.addConstraint
self.SUM = pulp.lpSum
self.lpDict = pulp.LpVariable.dicts
return self
def _create_variables(self):
""" Create all of the variables necessary to solve the Linear Program
This maps the variables from the ISO to the requisite Linear Program
Variables
Returns:
--------
energy_total_offers
reserve_total_offers
branch_flow
nodal_injection
reserve_zone_risk
"""
self.energy_offers = self.lpDict("Energy_Total",
self.ISO.energy_station_names, 0)
self.reserve_offers = self.lpDict("Reserve_Total",
self.ISO.reserve_station_names, 0)
self.branch_flow = self.lpDict("Transmission_Total",
self.ISO.branch_names)
self.nodal_injection = self.lpDict("Nodal_Injection",
self.ISO.node_names)
self.reserve_zone_risk = self.lpDict("Reserve_Risk",
self.ISO.reserve_zone_names, 0)
def _obj_function(self):
""" Objective Function
min \sum_i p_{g,i}g_{i} + \sum_j p_{r,j}r_{j}
"""
# Unpack the necessary variables
eoffers = self.energy_offers
eprices = self.ISO.energy_station_price
roffers = self.reserve_offers
rprices = self.ISO.reserve_station_price
enames = self.ISO.energy_station_names
rnames = self.ISO.reserve_station_names
# Set the objective function
self.lp.setObjective(self.SUM(
[eoffers[i] * eprices[i] for i in enames]) +
self.SUM([roffers[j] * rprices[j]
for j in rnames]))
def _nodal_demand(self):
""" Nodal Demand constraints
Injection_{n} = \sum_{j} g_{j(n)} - d_{n}
Injection_{n} = \sum_{t} f_{t(n)} * d_{t(n)}
"""
# Unpack variables
node_inj = self.nodal_injection
nodal_demand = self.ISO.nodal_demand
nodal_stations = self.ISO.nodal_stations
node_names = self.ISO.node_names
flow_map = self.ISO.node_flow_map
flow_dir = self.ISO.node_flow_direction
energy_offer = self.energy_offers
branch_flow = self.branch_flow
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for node in node_names:
n1 = '_'.join([node, 'Energy_Price'])
n2 = '_'.join([node, 'Nodal_Transmission'])
# Net Injections from Energy and Demand
self.addC(node_inj[node] == self.SUM([energy_offer[i]
for i in nodal_stations[node]]
) - nodal_demand[node] - eps,
n1)
# Net Injection from transmission
self.addC(node_inj[node] == self.SUM([branch_flow[t] *
flow_dir[node][t] for t in flow_map[node]]), n2)
def _energy_offers(self):
"""Energy offer constraints
g_{i} \le g_{max, i}
"""
# Unpack variables
eoffers = self.energy_offers
enames = self.ISO.energy_station_names
ecapacity = self.ISO.energy_station_capacity
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for i in enames:
name = '_'.join([i, 'Total_Energy'])
self.addC(eoffers[i] <= ecapacity[i] + eps, name)
def _reserve_offers(self):
""" Reserve Offer constraints
r_{j} \le r_{max, j}
"""
# Unpack variables
roffers = self.reserve_offers
rnames = self.ISO.reserve_station_names
rcapacity = self.ISO.reserve_station_capacity
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for i in rnames:
name = '_'.join([i, "Total_Reserve"])
self.addC(roffers[i] <= rcapacity[i] + eps, name)
def _transmission_offer(self):
""" Transmission Offer constraints
f_{t} \le f_{max, t}
f_{t} \ge -f_{max, t}
"""
bflows = self.branch_flow
bnames = self.ISO.branch_names
bcapacity = self.ISO.branch_capacity
eps = 0.00000001
for i in bnames:
n1 = '_'.join([i, 'Pos_flow'])
n2 = '_'.join([i, 'Neg_flow'])
self.addC(bflows[i] <= bcapacity[i], n1)
self.addC(bflows[i] >= bcapacity[i] * -1, n2)
def _reserve_proportion(self):
""" Reserve Proportion Constraints
r_{i} \le k_{i}g_{i}
"""
# Unpack Variables
spin_stations = self.ISO.reserve_spinning_stations
roffers = self.reserve_offers
eoffers = self.energy_offers
rprop = self.ISO.reserve_station_proportion
eps = 0.00000001
for i in spin_stations:
name = '_'.join([i, 'Reserve_Proportion'])
self.addC(roffers[i] <= rprop[i] * eoffers[i], name)
def _reserve_combined(self):
""" Reserve total capacity constraints
r_{i} + g_{i} \le g_{capacity, i}
"""
spin_stations = self.ISO.reserve_spinning_stations
roffers = self.reserve_offers
eoffers = self.energy_offers
tot_capacity = self.ISO.total_station_capacity
eps = 0.00000001
for i in spin_stations:
name = '_'.join([i, 'Total_Capacity'])
self.addC(roffers[i] + eoffers[i] <= tot_capacity[i] + eps, name)
def _generator_risk(self):
""" Risk for generators
Risk_{r} \ge g_{i(r)}
"""
rzones = self.ISO.reserve_zone_names
rzone_risk = self.reserve_zone_risk
rzone_stations = self.ISO.reserve_zone_generators
eoffers = self.energy_offers
station_risk = self.ISO.energy_station_risk
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for i in rzones:
for j in rzone_stations[i]:
if station_risk[j]:
name = '_'.join([i, j, 'Generator_Risk'])
self.addC(rzone_risk[i] >= eoffers[j] + eps, name)
def _transmission_risk(self):
""" Risk for a Transmission line
Risk_{r} \ge f_{t(r)} * d_{t(r)}
"""
rzones = self.ISO.reserve_zone_names
rzone_risk = self.reserve_zone_risk
bflow = self.branch_flow
bflow_dir = self.ISO.reserve_zone_flow_direction
bflow_map = self.ISO.reserve_zone_flow_map
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for i in rzones:
for j in bflow_map[i]:
name = '_'.join([i, j, "Transmission_Risk"])
self.addC(rzone_risk[i] >= bflow[j] * bflow_dir[i][j] + eps,
name)
def _reserve_dispatch(self):
""" Total Reserve Dispatch
\sum_{j(r)} r_{j} \ge Risk_{r}
"""
rzones = self.ISO.reserve_zone_names
rzone_risk = self.reserve_zone_risk
rzone_stations = self.ISO.reserve_zone_reserve
roffer = self.reserve_offers
# Introduce a buffer to ensure the duals work
eps = 0.00000001
for i in rzones:
name = '_'.join([i, 'Reserve_Price'])
self.addC(self.SUM([roffer[j]
for j in rzone_stations[i]]
) >= rzone_risk[i] + eps, name)
if __name__ == '__main__':
pass
|
|
from __future__ import division, print_function
import numpy as np
from scipy.signal import fftconvolve, convolve
import itertools
"""
08/31/17
Author: Rex McArthur
Creates a class of n-dim chebyshev polynomials. Tracks leading term,
coefficents, and inculdes basic operations (+,*,scaler multip, etc.)
Assumes GRevLex ordering, but should be extended.
"""
class MultiCheb(object):
"""
_____ params _______
dim: int, number of variables, dimension of chebyshev system
terms: int, highest term of single variable chebyshev polynomials
coeff: list(terms**dim) or np.array ([terms,] * dim), coefficents in given ordering
order: string, monomial ordering desired for Grubner calculations
lead_term: list, the index of the current leading coefficent
_____ methods ______
next_step:
input- Current: list, current location in ordering
output- the next step in ordering
"""
def __init__(self, coeff, order='grevlex', lead_term=None):
'''
terms, int- number of chebyshev polynomials each variable can have. Each dimension will have term terms
dim, int- number of different variables, how many dim our tensor will be
order, string- how you want to order your polynomials. Grevlex is default
'''
self.coeff = coeff
self.dim = self.coeff.ndim
self.terms = np.prod(self.coeff.shape)
self.order = order
self.shape = self.coeff.shape
self.max_term = np.max(self.shape) -1
if lead_term is None:
self.update_lead_term()
else:
self.lead_term = lead_term
def next_step(self, current):
'''
Used to calculate next step in the grevlex generator
'''
for i in range(self.dim-1, 0, -1):
i = int(i)
if i!= self.dim-1 and current[i] == 0:
break
elif i!= 0 and current[i] < self.max_term and current[i-1] > 0:
current[i] += 1
current[i-1] -= 1
return current
if len(current.nonzero()) > 0:
##### This is the problem ######
first_z = -1 * next(j for j,v in enumerate(current[::-1]) if v==0) - 1
# Finds the first non-zero afte a zero and iterates from there to create the
# Next high state
first_nz_after_z = -1*next(i for i,v in enumerate(current[first_z::-1]) if v!=0) -1
j = first_z + first_nz_after_z + 1
current[j] -= 1
current[j+1:] = self._calc_high_state(current[j+1:], self.state_sum-np.sum(current[:j+1]))
return current
raise ValueError("Condition not covered in step func")
def grevlex_gen(self, current=None):
'''
yields grevlex ordering co-ordinates in order to find
the leading coefficent
#TODO: Currently this is requiring square matricies, let's make it work for not square.
'''
self.state_sum = sum(np.array(self.shape)-1)
if current == None:
current = np.array(self.shape) -1
print('state sum', self.state_sum)
print('shaep', self.shape)
print('max term', self.max_term)
print('current', current)
low_state = self._calc_low_state(current)
self.state_sum = np.sum(current)
last_i = np.zeros_like(current)
last_i[-1] = 1
yield current
while True:
if all(current == last_i):
yield np.zeros_like(current)
return
elif all(current == low_state):
#print('Current -- lw_state')
#print('State Sum: {}'.format(self.state_sum))
#raw_input()
self.state_sum -= 1
current = self._calc_high_state(current, self.state_sum)
low_state = self._calc_low_state(current)
yield current
else:
current = self.next_step(current)
yield current
def _calc_low_state(self,current):
#print(self.shape)
max_term = np.max(self.shape) -1
#print('max_term')
#print(max_term)
if self.state_sum < max_term:
low_state = np.zeros_like(current)
low_state[-1] = self.state_sum
return low_state
else:
#print('State sum: {}'.format(self.state_sum))
#print('terms: {}'.format(self.dim))
#print(self.shape)
#raw_input()
slots = int(self.state_sum//max_term)
remainder = self.state_sum % max_term
low_state = np.zeros_like(current)
low_state[-slots:] = (self.shape[0]-1)*np.ones(1)
if remainder != 0:
low_state[-slots - 1] = remainder
return low_state.astype(int)
def _calc_high_state(self, current, sum_val):
max_term = np.max(self.shape) -1
slots = int(sum_val//max_term)
remainder = sum_val % max_term
high_state = np.zeros_like(current)
high_state[:slots] = (max_term)*np.ones(1)
if remainder != 0:
high_state[slots] = remainder
return high_state.astype(int)
def update_lead_term(self,start = None):
#print('Updating Leading Coeff...')
if self.order == 'grevlex':
gen = self.grevlex_gen()
for idx in gen:
if self.coeff[tuple(idx)] != 0:
self.lead_term = idx
self.lead_coeff = self.coeff[tuple(idx)]
break
#print('Leading Coeff is {}'.format(self.lead_term))
def __lt__(self, other):
'''
Magic method for determing which polynomial is smaller
'''
if sum(self.lead_term) < sum(other.lead_term):
return True
elif sum(self.lead_term) > sum(other.lead_term):
return False
else:
for i in xrange(len(self.lead_term)):
if self.lead_term[i] < other.lead_term[i]:
return True
if self.lead_term[i] > other.lead_term[i]:
return False
if self.coeff[tuple(self.lead_term)] < other.coeff[tuple(other.lead_term)]:
return True
def __gt__(self, other):
'''
Magic method for determing which polynomial is smaller
'''
if sum(self.lead_term) < sum(other.lead_term):
return False
elif sum(self.lead_term) > sum(other.lead_term):
return True
else:
for i in xrange(len(self.lead_term)):
if self.lead_term[i] < other.lead_term[i]:
return False
if self.lead_term[i] > other.lead_term[i]:
return True
if self.coeff[tuple(self.lead_term)] < other.coeff[tuple(other.lead_term)]:
return False
def __add__(self,other):
'''
Here we add an addition method
'''
return MultiCheb(self.coeff + other.coeff)
def __sub__(self,other):
'''
Here we subtract the two polys coeffs
'''
return MultiCheb(self.coeff - other.coeff)
def match_size(self,a,b):
'''
Matches the size of the polynomials
'''
new_shape = [max(i,j) for i,j in itertools.izip_longest(a.shape, b.shape)]
add_a = [i-j for i,j in zip(new_shape, a.shape)]
add_b = [i-j for i,j in zip(new_shape, b.shape)]
add_a_list = np.zeros((2,len(new_shape)))
add_b_list = np.zeros((2,len(new_shape)))
add_a_list[:,1] = add_a
add_b_list[:,1] = add_b
a = MultiCheb(np.pad(a.coeff,add_a_list.astype(int),'constant'))
b = MultiCheb(np.pad(b.coeff,add_b_list.astype(int),'constant'))
return a,b
def __mul__(self,other):
'''
Multiply by convolving intelligently
CURRENTLY ONLY DOING 2-D support
Manually make 1, 3D support then add n-dim support
'''
# Check and see if same size
if self.shape != other.shape:
new_self, new_other = self.match_size(self,other)
else:
new_self, new_other = self, other
c = new_other.coeff[::-1, ::-1]
p1 = convolve(new_self.coeff,new_other.coeff)
temp = convolve(new_self.coeff,c)
half = len(p1)//2
p2 = temp[:half+1,:][::-1] + temp[half:,:]
p2[0,:] = p2[0,:]/2.
p2 = p2[:,:half+1][:, ::-1] + p2[:,half:]
p2[:,0] = p2[:,0]/2.
p_z = np.zeros_like(p1)
p_z[:half+1, :half+1] = p2
new_coeff = .5*(p1 + p_z)
print(new_coeff)
new_coeff = np.around(new_coeff, 6)
print(new_coeff)
raw_input()
#TODO: You can use the lead_term kwarg to save some time
return MultiCheb(new_coeff)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp import Vasprun
from pymatgen.analysis.defects.core import DefectEntry, Vacancy
from pymatgen.analysis.defects.corrections import FreysoldtCorrection,\
BandFillingCorrection, BandEdgeShiftingCorrection
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class DefectsCorrectionsTest(PymatgenTest):
def test_freysoldt(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
vac = Vacancy(struc, struc.sites[0], charge=-3)
abc = struc.lattice.abc
axisdata = [np.arange(0., lattval, 0.2) for lattval in abc]
bldata = [np.array([1. for u in np.arange(0., lattval, 0.2)]) for lattval in abc]
dldata = [
np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0., lattval, 0.2)]) for lattval in abc
]
params = {'axis_grid': axisdata, 'bulk_planar_averages': bldata, 'defect_planar_averages': dldata}
fc = FreysoldtCorrection(15)
#test electrostatic correction
es_corr = fc.perform_es_corr(struc.lattice, -3)
self.assertAlmostEqual(es_corr, 0.975893)
#test potential alignment method
pot_corr = fc.perform_pot_corr(axisdata[0], bldata[0], dldata[0], struc.lattice, -3, vac.site.coords, 0)
self.assertAlmostEqual(pot_corr, 2.836369987722345)
#test entry full correction method
de = DefectEntry(vac, 0., corrections={}, parameters=params, entry_id=None)
val = fc.get_correction(de)
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.975893)
self.assertAlmostEqual(val['freysoldt_potential_alignment'], 4.4700574)
#test the freysoldt plotter and that plot metadata exists
pltsaver = []
for ax in range(3):
pltsaver.append(fc.plot(axis=ax))
self.assertAlmostEqual(len(pltsaver), 3)
#check that uncertainty metadata exists
for ax in range(3):
self.assertAlmostEqual(set(fc.metadata['pot_corr_uncertainty_md'][ax].keys()), set(['potcorr', 'stats']))
#test a specified axis from entry
fc = FreysoldtCorrection(15, axis=[1])
val = fc.get_correction(de)
self.assertAlmostEqual(val['freysoldt_potential_alignment'], 5.2869010593283132)
#test a different charge
# for electrostatic correction
es_corr = fc.perform_es_corr(struc.lattice, 2)
self.assertAlmostEqual(es_corr, 0.43373)
# for potential alignment method
pot_corr = fc.perform_pot_corr(axisdata[0], bldata[0], dldata[0], struc.lattice, 2, vac.site.coords, 0)
self.assertAlmostEqual(pot_corr, -2.1375685936497768)
#test an input anisotropic dielectric constant
fc = FreysoldtCorrection([[1., 2., 3.], [0., 3., 5.], [4., 10., 8.]])
self.assertAlmostEqual(fc.dielectric, 4.)
val = fc.get_correction(de)
self.assertAlmostEqual(val['freysoldt_electrostatic'], 3.659599)
self.assertAlmostEqual(val['freysoldt_potential_alignment'], 3.3605255195745087)
#test potalign being added to defect entry
self.assertAlmostEqual(de.parameters['potalign'], 1.1201751731915028)
#test that metadata entries exist in defect entry
self.assertTrue('freysoldt_meta' in de.parameters.keys())
self.assertAlmostEqual(
set(de.parameters['freysoldt_meta'].keys()), set(['pot_plot_data', 'pot_corr_uncertainty_md']))
#test a charge of zero
vac = Vacancy(struc, struc.sites[0], charge=0)
de = DefectEntry(vac, 0., corrections={}, parameters=params, entry_id=None)
val = fc.get_correction(de)
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.)
self.assertAlmostEqual(val['freysoldt_potential_alignment'], 0.)
def test_bandfilling(self):
v = Vasprun(os.path.join(test_dir, 'vasprun.xml'))
eigenvalues = v.eigenvalues.copy()
kptweights = v.actual_kpoints_weights
potalign = 0.
vbm = v.eigenvalue_band_properties[2]
cbm = v.eigenvalue_band_properties[1]
params = {
'eigenvalues': eigenvalues,
'kpoint_weights': kptweights,
'potalign': potalign,
'vbm': vbm,
'cbm': cbm
}
bfc = BandFillingCorrection()
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
vac = Vacancy(struc, struc.sites[0], charge=-3)
#test trivial performing bandfilling correction
bf_corr = bfc.perform_bandfill_corr(eigenvalues, kptweights, potalign, vbm, cbm)
self.assertAlmostEqual(bf_corr, 0.)
self.assertFalse(bfc.metadata['occupied_def_levels'])
self.assertFalse(bfc.metadata['unoccupied_def_levels'])
self.assertFalse(bfc.metadata['total_occupation_defect_levels'])
self.assertFalse(bfc.metadata['num_elec_cbm'])
self.assertFalse(bfc.metadata['num_hole_vbm'])
self.assertFalse(bfc.metadata['potalign'])
#test trivial full entry bandfill evaluation
de = DefectEntry(vac, 0., corrections={}, parameters=params, entry_id=None)
corr = bfc.get_correction(de)
self.assertAlmostEqual(corr['bandfilling'], 0.)
#modify the eigenvalue list to have free holes
hole_eigenvalues = {}
for spinkey, spinset in eigenvalues.items():
hole_eigenvalues[spinkey] = []
for kptset in spinset:
hole_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] < vbm) and (eig[0] > vbm - .8):
hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
hole_eigenvalues[spinkey][-1].append(eig)
hole_bf_corr = bfc.perform_bandfill_corr(hole_eigenvalues, kptweights, potalign, vbm, cbm)
self.assertAlmostEqual(hole_bf_corr, -0.41138336)
self.assertAlmostEqual(bfc.metadata['num_hole_vbm'], 0.8125000649)
self.assertFalse(bfc.metadata['num_elec_cbm'])
#modify the eigenvalue list to have free electrons
elec_eigenvalues = {}
for spinkey, spinset in eigenvalues.items():
elec_eigenvalues[spinkey] = []
for kptset in spinset:
elec_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] > cbm) and (eig[0] < cbm + .2):
elec_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
elec_eigenvalues[spinkey][-1].append(eig)
elec_bf_corr = bfc.perform_bandfill_corr(elec_eigenvalues, kptweights, potalign, vbm, cbm)
self.assertAlmostEqual(elec_bf_corr, -0.0903187572254)
self.assertAlmostEqual(bfc.metadata['num_elec_cbm'], 0.8541667349)
self.assertFalse(bfc.metadata['num_hole_vbm'])
#modify the potalignment and introduce new occupied defect levels from vbm states
potalign = -0.1
bf_corr = bfc.perform_bandfill_corr(eigenvalues, kptweights, potalign, vbm, cbm)
self.assertAlmostEqual(bfc.metadata['num_hole_vbm'], 0.)
self.assertAlmostEqual(bf_corr, 0.)
occu = [[1.457, 0.0833333], [1.5204, 0.0833333], [1.53465, 0.0833333], [1.5498, 0.0416667]]
self.assertArrayAlmostEqual(
list(sorted(bfc.metadata['occupied_def_levels'], key=lambda x: x[0])), list(
sorted(occu, key=lambda x: x[0])))
self.assertAlmostEqual(bfc.metadata['total_occupation_defect_levels'], 0.29166669)
self.assertFalse(bfc.metadata['unoccupied_def_levels'])
def test_bandedgeshifting(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
vac = Vacancy(struc, struc.sites[0], charge=-3)
besc = BandEdgeShiftingCorrection()
params = {'hybrid_cbm': 1., 'hybrid_vbm': -1., 'vbm': -0.5, 'cbm': 0.6, 'num_hole_vbm': 0., 'num_elec_cbm': 0.}
de = DefectEntry(vac, 0., corrections={}, parameters=params, entry_id=None)
#test with no free carriers
corr = besc.get_correction(de)
self.assertEqual(corr['vbm_shift_correction'], 1.5)
self.assertEqual(corr['elec_cbm_shift_correction'], 0.)
self.assertEqual(corr['hole_vbm_shift_correction'], 0.)
#test with free holes
de.parameters.update({'num_hole_vbm': 1.})
corr = besc.get_correction(de)
self.assertEqual(corr['vbm_shift_correction'], 1.5)
self.assertEqual(corr['elec_cbm_shift_correction'], 0.)
self.assertEqual(corr['hole_vbm_shift_correction'], 0.5)
#test with free electrons
de.parameters.update({'num_hole_vbm': 0., 'num_elec_cbm': 1.})
corr = besc.get_correction(de)
self.assertEqual(corr['vbm_shift_correction'], 1.5)
self.assertEqual(corr['elec_cbm_shift_correction'], 0.4)
self.assertEqual(corr['hole_vbm_shift_correction'], 0.)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
from configparser import ConfigParser
from uuid import getnode as get_mac
from sys import platform as ostype
from datetime import datetime
from threading import Thread
import traceback
import platform
import cpuinfo
import psutil
import socket
import time
from bin.boot_time import BootTime
from bin.load_avg import LoadAvg
from bin.network import Network
from bin.memory import Memory
from bin.disk import Disk
from bin.cpu import CPU
# version
VERSION = '1.0'
# global attributes
os_type = ''
ram_percent = 0
ram_used = 0
ram_total = 0
swap_percent = 0
swap_used = 0
swap_total = 0
cpu_percent = 0
boot_time = ''
network_list = []
disk_list = []
load_1min = ''
load_5min = ''
load_15min = ''
# convert human sizes to bytes
def convert_bytes(byts):
try:
byts = byts.lower()
if byts.endswith('kb'):
return int(byts[0:-2]) * 1024
elif byts.endswith('mb'):
return int(byts[0:-2]) * 1024 * 1024
elif byts.endswith('gb'):
return int(byts[0:-2]) * 1024 * 1024 * 1024
# for anything else... just throw an exception, we care zero
raise IOError('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb')
except Exception as error:
raise Exception('Invalid input. Correct format: #kb/#mb/#gb like 10gb or 5mb. An error ' +
repr(error) + ' occurred.')
# load config
config = ConfigParser()
config.read('config.ini')
err_type = ''
log_file = ''
flsk_host = ''
flsk_port = 0
try:
# log values
err_type = 'Log > Name'
log_file = config.get('Log', 'Name', fallback='agent.log')
# flask values
err_type = 'Flask > Host'
flsk_host = config.get('Flask', 'Host', fallback='0.0.0.0')
err_type = 'Flask > Port'
flsk_port = config.getint('Flask', 'Port', fallback=5000)
except IOError as e:
print('CONFIG ERROR: Unable to load values from \"{}\"! STACKTRACE: \n{}'.format(err_type, traceback.format_exc()))
print('CONFIG ERROR: Force closing program...')
exit()
# prepare logging
logger = None
try:
logger = open(log_file, 'a')
except IOError as e:
print('FILE ERROR: Unable to open log file! STACKTRACE: \n{}'.format(traceback.format_exc()))
print('FILE ERROR: Force closing program...')
exit()
# perform logging
LOG_FORMAT = '{} | {:6s} | {:6s} | {}'
def log(level, typ, message):
try:
print(LOG_FORMAT.format(datetime.now().strftime('%Y-%m-%d %X'),
level,
typ,
message))
logger.write(LOG_FORMAT.format(datetime.now().strftime('%Y-%m-%d %X'),
level,
typ,
message) + '\n')
logger.flush()
except IOError as ex:
print(LOG_FORMAT.format(datetime.now().strftime('%Y-%m-%d %X'),
'ERROR',
'AGENT',
'Unable to log to file! STACKTRACE: \n{}'.format(traceback.format_exc())))
# setup variables
smemory = Memory()
scpu = CPU()
net = Network()
load = LoadAvg()
boot = BootTime()
sdisk = Disk()
app = Flask(__name__)
# display system hardware specs
@app.route('/specs')
def web_specs():
# retrieve current system hardware specs
operating_system = platform.platform()
try:
cpu_brand = cpuinfo.get_cpu_info()['brand']
cpu_cores = '{} cores @ {}'.format(cpuinfo.get_cpu_info()['count'],
cpuinfo.get_cpu_info()['hz_advertised'])
except KeyError:
cpu_brand = 'unknown'
cpu_cores = '{} cores total'.format(cpuinfo.get_cpu_info()['count'])
total_ram = '{} GB'.format(round(psutil.virtual_memory().total / 1024 / 1024 / 1024), 0)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 1))
is_linux, load_1m, load_5m, load_15m = load.get_load()
if not is_linux:
load_1m = 'NULL'
load_5m = 'NULL'
load_15m = 'NULL'
# create json data
json_data = {
'version': VERSION,
'hostname': socket.gethostname(),
'ip': s.getsockname()[0],
'mac': ':'.join(("%012X" % get_mac())[i:i+2] for i in range(0, 12, 2)),
'os': operating_system,
'cpu_brand': cpu_brand,
'cpu_cores': cpu_cores,
'ram': total_ram,
'boot': boot_time,
'load': {
'onemin': load_1m,
'fivemin': load_5m,
'fifteenmin': load_15m
}
}
log('INFO', 'AGENT', 'Retrieved hardware specs for IP: {}'.format(request.remote_addr))
# print json data
return jsonify(json_data)
# display current specs
@app.route('/now')
def web_now():
# create json object
json_data = {
'version': VERSION,
'os': os_type,
'ram': {
'percent': ram_percent,
'used': ram_used,
'total': ram_total,
},
'swap': {
'percent': swap_percent,
'used': swap_used,
'total': swap_total
},
'cpu': {
'percent': cpu_percent
},
'boot': {
'timestamp': boot_time
},
'load': {
'onemin': load_1min,
'fivemin': load_5min,
'fifteenmin': load_15min
},
'disks': []
}
for disk in disk_list:
json_data['disks'].append(disk.__dict__)
log('INFO', 'AGENT', 'Retrieved now status for IP: {}'.format(request.remote_addr))
# print json data
return jsonify(json_data)
# display full system specs
@app.route('/all')
def web_all():
# create json object
json_data = {
'version': VERSION,
'os': os_type,
'memory': {
'ram': {
'percent': ram_percent,
'used': ram_used,
'total': ram_total,
},
'swap': {
'percent': swap_percent,
'used': swap_used,
'total': swap_total
}
},
'cpu': {
'percent': cpu_percent
},
'network': [],
'load': {
'onemin': load_1min,
'fivemin': load_5min,
'fifteenmin': load_15min
}
}
for nic in network_list:
json_data['network'].append(nic.__dict__)
log('INFO', 'AGENT', 'Retrieved all status for IP: {}'.format(request.remote_addr))
# print json data
return jsonify(json_data)
# auto update values
def specs_updater():
global os_type, ram_percent, ram_used, ram_total, swap_percent, swap_used, swap_total, cpu_percent, boot_time, \
network_list, disk_list, load_1min, load_5min, load_15min
while True:
# update OS type
if ostype == 'linux' or ostype == 'linux2':
os_type = 'linux'
elif ostype == 'darwin':
os_type = 'apple'
elif ostype == 'win32':
os_type = 'windows'
elif 'bsd' in ostype:
os_type = 'desktop'
else:
os_type = 'question'
# update RAM info
ram_percent, ram_used, ram_total = smemory.get_memory_usage()
# update swap info
swap_percent, swap_used, swap_total = smemory.get_swap_usage()
# update cpu info
cpu_percent = scpu.get_usage()
# update boot time
boot_time = boot.get_boot_time()
# update network info
network_list = net.get_nic_status()
# update disk info
disk_list = sdisk.get_disks()
is_linux, load_1min, load_5min, load_15min = load.get_load()
if not is_linux:
load_1min = 'NULL'
load_5min = 'NULL'
load_15min = 'NULL'
time.sleep(1)
# start flask process
if __name__ == '__main__':
log('INFO', 'AGENT', 'Starting program...')
log('INFO', 'AGENT', 'Starting auto updater...')
thd = Thread(target=specs_updater)
thd.daemon = True
thd.start()
log('INFO', 'AGENT', 'Auto updater started!')
# wait 2 seconds so the updater can go through its first job
time.sleep(2)
# start Flask service
log('INFO', 'AGENT', 'Now listening for HTTP requests...')
app.run(host=flsk_host, port=flsk_port, threaded=True)
|
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer.functions.connection import linear
from chainer.functions.connection import n_step_rnn
from chainer.utils import argument
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
class NStepGRU(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='gru', **kwargs)
class NStepBiGRU(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='gru', **kwargs)
def n_step_gru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_gru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bigru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_bigru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Bi-directional Gated Recurrent Unit function.
This function calculates stacked Bi-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r^{f}_t &= \\sigma(W^{f}_0 x_t + W^{f}_3 h_{t-1} + b^{f}_0 + b^{f}_3)
\\\\
z^{f}_t &= \\sigma(W^{f}_1 x_t + W^{f}_4 h_{t-1} + b^{f}_1 + b^{f}_4)
\\\\
h^{f'}_t &= \\tanh(W^{f}_2 x_t + b^{f}_2 + r^{f}_t \\cdot (W^{f}_5
h_{t-1} + b^{f}_5)) \\\\
h^{f}_t &= (1 - z^{f}_t) \\cdot h^{f'}_t + z^{f}_t \\cdot h_{t-1}
\\\\
r^{b}_t &= \\sigma(W^{b}_0 x_t + W^{b}_3 h_{t-1} + b^{b}_0 + b^{b}_3)
\\\\
z^{b}_t &= \\sigma(W^{b}_1 x_t + W^{b}_4 h_{t-1} + b^{b}_1 + b^{b}_4)
\\\\
h^{b'}_t &= \\tanh(W^{b}_2 x_t + b^{b}_2 + r^{b}_t \\cdot (W^{b}_5
h_{t-1} + b^{b}_5)) \\\\
h^{b}_t &= (1 - z^{b}_t) \\cdot h^{b'}_t + z^{b}_t \\cdot h_{t-1}
\\\\
h_t &= [h^{f}_t; h^{b}_t] \\\\
where :math:`W^{f}` is weight matrices for forward-GRU, :math:`W^{b}` is
weight matrices for backward-GRU.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses
Bi-direction GRU.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction, **kwargs):
"""n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs, use_bi_direction)
Base function for Stack GRU/BiGRU functions.
This function is used at :func:`chainer.functions.n_step_bigru` and
:func:`chainer.functions.n_step_gru`.
This function's behavior depends on argument ``use_bi_direction``.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
use_bi_direction (bool): If ``True``, this function uses
Bi-direction GRU.
.. seealso::
:func:`chainer.functions.n_step_rnn`
:func:`chainer.functions.n_step_birnn`
""" # NOQA
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
xp = backend.get_array_module(hx, hx.data)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
states = cuda.get_cudnn_dropout_states()
states.set_dropout_ratio(dropout_ratio)
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'gru', ws, bs)
if use_bi_direction:
rnn = NStepBiGRU
else:
rnn = NStepGRU
hy, ys = rnn(n_layers, states, lengths)(hx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, ys
else:
hy, _, ys = n_step_rnn.n_step_rnn_impl(
_gru, n_layers, dropout_ratio, hx, None, ws, bs, xs,
use_bi_direction)
return hy, ys
def _gru(x, h, c, w, b):
xw = concat.concat([w[0], w[1], w[2]], axis=0)
hw = concat.concat([w[3], w[4], w[5]], axis=0)
xb = concat.concat([b[0], b[1], b[2]], axis=0)
hb = concat.concat([b[3], b[4], b[5]], axis=0)
gru_x = linear.linear(x, xw, xb)
gru_h = linear.linear(h, hw, hb)
W_r_x, W_z_x, W_x = split_axis.split_axis(gru_x, 3, axis=1)
U_r_h, U_z_h, U_x = split_axis.split_axis(gru_h, 3, axis=1)
r = sigmoid.sigmoid(W_r_x + U_r_h)
z = sigmoid.sigmoid(W_z_x + U_z_h)
h_bar = tanh.tanh(W_x + r * U_x)
return (1 - z) * h_bar + z * h, None
|
|
import ugfx, badge, network, gc, time, urequests, appglue, sys
# SHA2017 Badge installer
# V2 Thomas Roos
# V1 Niek Blankers
def draw_msg(msg):
global line_number
try:
line_number
except:
line_number = 0
ugfx.clear(ugfx.WHITE)
ugfx.string(0, 0, 'Still Loading Anyway...', "PermanentMarker22", ugfx.BLACK)
ugfx.set_lut(ugfx.LUT_FASTER)
draw_msg(msg)
else:
ugfx.string(0, 30 + (line_number * 15), msg, "Roboto_Regular12", ugfx.BLACK)
ugfx.flush()
line_number += 1
def connectWiFi():
nw = network.WLAN(network.STA_IF)
if not nw.isconnected():
nw.active(True)
ssid = badge.nvs_get_str('badge', 'wifi.ssid', 'SHA2017-insecure')
password = badge.nvs_get_str('badge', 'wifi.password')
nw.connect(ssid, password) if password else nw.connect(ssid)
draw_msg("Connecting to '"+ssid+"'...")
timeout = 150
while not nw.isconnected():
time.sleep(0.1)
timeout = timeout - 1
if (timeout<1):
draw_msg("Timeout while connecting!")
nw.active(True)
return False
return True
def show_description(active):
if active:
global text
text.text(packages[options.selected_index()]["description"])
ugfx.flush()
def select_category(active):
if active:
global categories
global options
index = options.selected_index()
if categories[index]["eggs"] > 0:
category = categories[index]["slug"]
list_apps(category)
def list_apps(slug):
global options
global text
global packages
ugfx.input_attach(ugfx.JOY_UP, 0)
ugfx.input_attach(ugfx.JOY_DOWN, 0)
ugfx.input_attach(ugfx.BTN_A, 0)
ugfx.input_attach(ugfx.BTN_B, 0)
ugfx.input_attach(ugfx.BTN_START, 0)
while options.count() > 0:
options.remove_item(0)
text.text("Downloading list of eggs...")
ugfx.flush(ugfx.LUT_FULL)
try:
f = urequests.get("https://badge.sha2017.org/eggs/category/%s/json" % slug, timeout=30)
try:
packages = f.json()
finally:
f.close()
except BaseException as e:
print("[Installer] Failed to download list of eggs:")
sys.print_exception(e)
text.text("Download failed")
ugfx.flush(ugfx.LUT_FULL)
list_categories()
gc.collect()
return
for package in packages:
options.add_item("%s rev. %s" % (package["name"], package["revision"]))
options.selected_index(0)
ugfx.input_attach(ugfx.JOY_UP, show_description)
ugfx.input_attach(ugfx.JOY_DOWN, show_description)
ugfx.input_attach(ugfx.BTN_A, install_app)
ugfx.input_attach(ugfx.BTN_B, lambda pushed: list_categories() if pushed else False)
ugfx.input_attach(ugfx.BTN_START, lambda pushed: appglue.start_app('') if pushed else False)
show_description(True)
gc.collect()
def start_categories(pushed):
if pushed:
list_categories()
def start_app(pushed):
if pushed:
global selected_app
appglue.start_app(selected_app)
def install_app(active):
if active:
global options
global text
global packages
global selected_app
index = options.selected_index()
ugfx.input_attach(ugfx.JOY_UP, 0)
ugfx.input_attach(ugfx.JOY_DOWN, 0)
ugfx.input_attach(ugfx.BTN_A, 0)
ugfx.input_attach(ugfx.BTN_B, 0)
ugfx.input_attach(ugfx.BTN_START, 0)
ugfx.clear(ugfx.BLACK)
ugfx.string(40,25,"Installing:","Roboto_BlackItalic24",ugfx.WHITE)
ugfx.string(100,55, packages[index]["name"],"PermanentMarker22",ugfx.WHITE)
ugfx.flush()
latest = False
import woezel
selected_app = packages[index]["slug"]
try:
woezel.install(selected_app)
except woezel.LatestInstalledError:
latest = True
except:
ugfx.string(160,85,"Failed","Roboto_BlackItalic24",ugfx.WHITE)
ugfx.flush()
time.sleep(4)
list_categories()
return
ugfx.clear(ugfx.WHITE)
if latest:
ugfx.string(40,25,"Already installed:","Roboto_BlackItalic24",ugfx.BLACK)
else:
ugfx.string(40,25,"Installed:","Roboto_BlackItalic24",ugfx.BLACK)
ugfx.string(100,55, packages[index]["name"],"PermanentMarker22",ugfx.BLACK)
ugfx.string(0, 115, "[ A: START | B: BACK ]", "Roboto_Regular12", ugfx.BLACK)
ugfx.input_attach(ugfx.BTN_A, start_app)
ugfx.input_attach(ugfx.BTN_B, start_categories)
ugfx.input_attach(ugfx.BTN_START, lambda pushed: appglue.start_app("") if pushed else False)
ugfx.flush()
gc.collect()
def list_categories():
global options
global text
global categories
try:
categories
except:
ugfx.input_init()
draw_msg('Getting categories')
try:
f = urequests.get("https://badge.sha2017.org/eggs/categories/json", timeout=30)
categories = f.json()
except:
draw_msg('Failed!')
draw_msg('Returning to launcher :(')
appglue.start_app('launcher', False)
f.close()
draw_msg('Done!')
options = ugfx.List(0,0,int(ugfx.width()/2),ugfx.height())
text = ugfx.Textbox(int(ugfx.width()/2),0, int(ugfx.width()/2), ugfx.height())
ugfx.input_attach(ugfx.JOY_UP, lambda pushed: ugfx.flush() if pushed else False)
ugfx.input_attach(ugfx.JOY_DOWN, lambda pushed: ugfx.flush() if pushed else False)
ugfx.input_attach(ugfx.BTN_A, select_category)
ugfx.input_attach(ugfx.BTN_B, lambda pushed: appglue.start_app("launcher", False) if pushed else False)
ugfx.input_attach(ugfx.BTN_START, lambda pushed: appglue.start_app("") if pushed else False)
ugfx.clear(ugfx.WHITE)
ugfx.flush()
while options.count() > 0:
options.remove_item(0)
for category in categories:
options.add_item("%s (%d) >" % (category["name"], category["eggs"]))
options.selected_index(0)
text.text("Install or update eggs from the hatchery here\n\n\n\n")
ugfx.string_box(148,0,148,26, "Hatchery", "Roboto_BlackItalic24", ugfx.BLACK, ugfx.justifyCenter)
ugfx.line(148, 78, 296, 78, ugfx.BLACK)
ugfx.string_box(148,78,148,18, " A: Open category", "Roboto_Regular12", ugfx.BLACK, ugfx.justifyLeft)
ugfx.string_box(148,92,148,18, " B: Return to home", "Roboto_Regular12", ugfx.BLACK, ugfx.justifyLeft)
ugfx.line(148, 110, 296, 110, ugfx.BLACK)
ugfx.string_box(148,110,148,18, " badge.sha2017.org", "Roboto_Regular12", ugfx.BLACK, ugfx.justifyLeft)
ugfx.flush(ugfx.LUT_FULL)
gc.collect()
if not connectWiFi():
draw_msg('Returning to launcher :(')
appglue.start_app('launcher', False)
else:
list_categories()
|
|
#!/usr/bin/env python
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating test data for Ceilometer.
Usage:
Generate testing data for e.g. for default time span
source .tox/py27/bin/activate
./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util
--volume 20
"""
import argparse
import datetime
import logging
import random
import sys
import uuid
from oslo_config import cfg
from oslo_utils import timeutils
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer import storage
def make_test_data(name, meter_type, unit, volume, random_min,
random_max, user_id, project_id, resource_id, start,
end, interval, resource_metadata=None, source='artificial'):
resource_metadata = resource_metadata or {'display_name': 'toto',
'host': 'tata',
'image_ref_url': 'test',
'instance_flavor_id': 'toto',
}
# Compute start and end timestamps for the new data.
if isinstance(start, datetime.datetime):
timestamp = start
else:
timestamp = timeutils.parse_strtime(start)
if not isinstance(end, datetime.datetime):
end = timeutils.parse_strtime(end)
increment = datetime.timedelta(minutes=interval)
print('Adding new samples for meter %s.' % (name))
# Generate samples
n = 0
total_volume = volume
while timestamp <= end:
if (random_min >= 0 and random_max >= 0):
# If there is a random element defined, we will add it to
# user given volume.
if isinstance(random_min, int) and isinstance(random_max, int):
total_volume += random.randint(random_min, random_max)
else:
total_volume += random.uniform(random_min, random_max)
c = sample.Sample(name=name,
type=meter_type,
unit=unit,
volume=total_volume,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=timestamp.isoformat(),
resource_metadata=resource_metadata,
source=source,
)
data = utils.meter_message_from_counter(
c, cfg.CONF.publisher.telemetry_secret)
# timestamp should be string when calculating signature, but should be
# datetime object when calling record_metering_data.
data['timestamp'] = timestamp
yield data
n += 1
timestamp = timestamp + increment
if (meter_type == 'gauge' or meter_type == 'delta'):
# For delta and gauge, we don't want to increase the value
# in time by random element. So we always set it back to
# volume.
total_volume = volume
print('Added %d new samples for meter %s.' % (n, name))
def record_test_data(conn, *args, **kwargs):
for data in make_test_data(*args, **kwargs):
conn.record_metering_data(data)
def get_parser():
parser = argparse.ArgumentParser(
description='generate metering data',
)
parser.add_argument(
'--interval',
default=10,
type=int,
help='The period between samples, in minutes.',
)
parser.add_argument(
'--start',
default=31,
help='Number of days to be stepped back from now or date in the past ('
'"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.',
)
parser.add_argument(
'--end',
default=2,
help='Number of days to be stepped forward from now or date in the '
'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end '
'range.',
)
parser.add_argument(
'--type',
choices=('gauge', 'cumulative'),
default='gauge',
dest='meter_type',
help='Counter type.',
)
parser.add_argument(
'--unit',
default=None,
help='Counter unit.',
)
parser.add_argument(
'--project',
dest='project_id',
help='Project id of owner.',
)
parser.add_argument(
'--user',
dest='user_id',
help='User id of owner.',
)
parser.add_argument(
'--random_min',
help='The random min border of amount for added to given volume.',
type=int,
default=0,
)
parser.add_argument(
'--random_max',
help='The random max border of amount for added to given volume.',
type=int,
default=0,
)
parser.add_argument(
'--resource',
dest='resource_id',
default=str(uuid.uuid4()),
help='The resource id for the meter data.',
)
parser.add_argument(
'--counter',
default='instance',
dest='name',
help='The counter name for the meter data.',
)
parser.add_argument(
'--volume',
help='The amount to attach to the meter.',
type=int,
default=1,
)
return parser
def main():
cfg.CONF([], project='ceilometer')
args = get_parser().parse_args()
# Set up logging to use the console
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
root_logger = logging.getLogger('')
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
# Connect to the metering database
conn = storage.get_connection_from_config(cfg.CONF)
# Find the user and/or project for a real resource
if not (args.user_id or args.project_id):
for r in conn.get_resources():
if r.resource_id == args.resource_id:
args.user_id = r.user_id
args.project_id = r.project_id
break
# Compute the correct time span
format = '%Y-%m-%dT%H:%M:%S'
try:
start = datetime.datetime.utcnow() - datetime.timedelta(
days=int(args.start))
except ValueError:
try:
start = datetime.datetime.strptime(args.start, format)
except ValueError:
raise
try:
end = datetime.datetime.utcnow() + datetime.timedelta(
days=int(args.end))
except ValueError:
try:
end = datetime.datetime.strptime(args.end, format)
except ValueError:
raise
args.start = start
args.end = end
record_test_data(conn=conn, **args.__dict__)
return 0
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2013 Janusz Skonieczny
from importlib import import_module
import logging
from flask_oauth import OAuthRemoteApp
from flask_babel import gettext as _
DEFAULT_PROPERTIES = ("user_id", "display_name", "first_name", "last_name", "email", "image_url")
class BaseProvider(OAuthRemoteApp):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(None, *args, **kwargs)
def get_profile(self, raw_data):
raise NotImplementedError()
class ExternalProfile(object):
def __init__(self, profile_id, data, raw_data):
self.id = profile_id
self.data = data
self.raw_data = raw_data
class Twitter(BaseProvider):
def __init__(self, *args, **kwargs):
defaults = {
'name': 'Twitter',
'base_url': 'http://api.twitter.com/1/',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authenticate'
}
defaults.update(kwargs)
super(Twitter, self).__init__(*args, **defaults)
self.tokengetter(lambda: None)
def get_profile(self, raw_data):
logging.debug("data: %s" % raw_data)
import twitter
api = twitter.Api(consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
access_token_key=raw_data['oauth_token'],
access_token_secret=raw_data['oauth_token_secret'],
cache=None)
profile = api.VerifyCredentials()
name_split = profile.name.split(" ", 1)
data = {
'provider': self.name,
'profile_id': str(profile.id),
'username': profile.screen_name,
"email": None, # twitter does not provide email
'access_token': raw_data['oauth_token'],
'secret': raw_data['oauth_token_secret'],
"first_name": name_split[0],
"last_name": name_split[1] if len(name_split) > 1 else None,
'cn': profile.name,
'profile_url': "http://twitter.com/{}".format(profile.screen_name),
'image_url': profile.profile_image_url
}
return ExternalProfile(str(profile.id), data, raw_data)
class Google(BaseProvider):
def __init__(self, *args, **kwargs):
defaults = {
'name': 'Google',
'base_url': 'https://www.google.com/accounts/',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_params': {
'grant_type': 'authorization_code'
},
'request_token_params': {
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/plus.me email'
}
}
defaults.update(kwargs)
super(Google, self).__init__(*args, **defaults)
def get_profile(self, raw_data):
access_token = raw_data['access_token']
import oauth2client.client as googleoauth
import apiclient.discovery as googleapi
import httplib2
credentials = googleoauth.AccessTokenCredentials(
access_token=access_token,
user_agent=''
)
http = httplib2.Http()
http = credentials.authorize(http)
api = googleapi.build('plus', 'v1', http=http)
profile = api.people().get(userId='me').execute()
name = profile.get('name')
data = {
'provider': "Google",
'profile_id': profile['id'],
'username': None,
"email": profile.get('emails')[0]["value"],
'access_token': access_token,
'secret': None,
"first_name": name.get("givenName"),
"last_name": name.get("familyName"),
'cn': profile.get('displayName'),
'profile_url': profile.get('url'),
'image_url': profile.get('image', {}).get("url")
}
return ExternalProfile(str(profile['id']), data, raw_data)
class Facebook(BaseProvider):
def __init__(self, *args, **kwargs):
defaults = {
'name': 'Facebook',
'base_url': 'https://graph.facebook.com/',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
'request_token_params': {
'scope': 'email'
}
}
defaults.update(kwargs)
super(Facebook, self).__init__(*args, **defaults)
def get_profile(self, raw_data):
access_token = raw_data['access_token']
import facebook
graph = facebook.GraphAPI(access_token)
profile = graph.get_object("me")
profile_id = profile['id']
data = {
"provider": "Facebook",
"profile_id": profile_id,
"username": profile.get('username'),
"email": profile.get('email'),
"access_token": access_token,
"secret": None,
"first_name": profile.get('first_name'),
"last_name": profile.get('last_name'),
"cn": profile.get('name'),
"profile_url": "http://facebook.com/profile.php?id={}".format(profile_id),
"image_url": "http://graph.facebook.com/{}/picture".format(profile_id),
}
return ExternalProfile(profile_id, data, raw_data)
class Github(BaseProvider):
def __init__(self, *args, **kwargs):
defaults = {
'name': 'Github',
'base_url': 'https://github.com/',
'authorize_url': 'https://github.com/login/oauth/authorize',
'access_token_url': 'https://github.com/login/oauth/access_token',
'request_token_url': None,
'request_token_params': {
'response_type': 'code',
'scope': 'user:email'
}
}
defaults.update(kwargs)
super(Github, self).__init__(*args, **defaults)
def get_profile(self, raw_data):
logging.debug("raw_data: %s" % raw_data)
access_token = raw_data['access_token']
import requests
import json
r = requests.get('https://api.github.com/user?access_token={}'.format(access_token))
if not r.ok:
raise Exception(_("Could not load profile data from Github API"))
profile = json.loads(r.text or r.content)
r = requests.get('https://api.github.com/user/emails?access_token={}'.format(access_token))
if not r.ok:
raise Exception(_("Could not load emails data from from Github API"))
emails = json.loads(r.text or r.content)
name_split = profile.get('name', "").split(" ", 1)
data = {
"provider": "Github",
"profile_id": str(profile["id"]),
"username": profile.get('login'),
"email": emails[0].get("email"),
"access_token": access_token,
"secret": None,
"first_name": name_split[0],
"last_name": name_split[1] if len(name_split) > 1 else None,
"cn": profile.get('name'),
"profile_url": profile["html_url"],
"image_url": profile["avatar_url"],
}
return ExternalProfile(str(profile['id']), data, raw_data)
|
|
from inflection import camelize
from pytest import raises
from interfax.client import InterFAX
try:
from urllib.parse import urlunsplit, urlencode
except ImportError:
from urllib import urlencode
from urlparse import urlunsplit
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class TestInterFAX(object):
def setup_method(self, method):
self.client = InterFAX('username', 'password')
def teardown_method(self, method):
del self.client
def test___init__(self, fake):
username = fake.pystr()
password = fake.pystr()
timeout = fake.pyfloat()
client = InterFAX(username, password)
assert client.username == username
assert client.password == password
client = InterFAX(username, password)
assert client.username == username
assert client.password == password
def test__init__environ(self, fake):
username = fake.pystr()
password = fake.pystr()
environ = {
'INTERFAX_USERNAME': username,
'INTERFAX_PASSWORD': password
}
with patch.dict('os.environ', environ):
client = InterFAX()
assert client.username == username
assert client.password == password
def test___init__errors(self, fake):
username = fake.pystr()
password = fake.pystr()
with raises(TypeError):
InterFAX(username=username)
with raises(TypeError):
InterFAX(password=password)
def test_inbound(self):
assert self.client.inbound.client == self.client
def test_outbound(self):
assert self.client.outbound.client == self.client
def test_account(self):
assert self.client.account.client == self.client
def test_documents(self):
assert self.client.documents.client == self.client
def test_files(self):
assert self.client.files.client == self.client
def test_deliver(self, fax_number, fake):
self.client.outbound = Mock()
files = fake.pytuple(10, True, str)
kwargs = fake.pydict()
result = self.client.deliver(fax_number, files, **kwargs)
self.client.outbound.deliver.assert_called_with(fax_number, files,
**kwargs)
assert result == self.client.outbound.deliver.return_value
def test_get(self, fake):
path = fake.pystr()
params = fake.pydict()
valid_keys = fake.pytuple(10, True, str)
self.client._request = Mock()
self.client._url_for = Mock()
kwargs = fake.pydict()
result = self.client.get(path, params, valid_keys, **kwargs)
self.client._url_for.assert_called_with(path, params, valid_keys)
url = self.client._url_for.return_value
self.client._request.assert_called_with('GET', url, **kwargs)
assert result == self.client._request.return_value
def test_post(self, fake):
path = fake.pystr()
params = fake.pydict()
valid_keys = fake.pytuple(10, True, str)
kwargs = fake.pydict()
self.client._request = Mock()
self.client._url_for = Mock()
result = self.client.post(path, params, valid_keys, **kwargs)
self.client._url_for.assert_called_with(path, params, valid_keys)
url = self.client._url_for.return_value
self.client._request.assert_called_with('POST', url, **kwargs)
assert result == self.client._request.return_value
def test_delete(self, fake):
path = fake.pystr()
kwargs = fake.pydict()
self.client._request = Mock()
self.client._url_for = Mock()
result = self.client.delete(path, **kwargs)
self.client._url_for.assert_called_with(path)
url = self.client._url_for.return_value
self.client._request.assert_called_with('DELETE', url, **kwargs)
assert result == self.client._request.return_value
def test__request(self, fake):
url = fake.uri()
method = fake.pystr()
kwargs = fake.pydict()
self.client._parse_response = Mock()
self.client.timeout = fake.pyfloat()
with patch('interfax.client.request') as request:
self.client._request(method, url, **kwargs)
kwargs.setdefault('headers', {})
kwargs['headers']['User-Agent'] = self.client.USER_AGENT
kwargs['auth'] = (self.client.username, self.client.password)
request.assert_called_with(method, url, **kwargs)
self.client._parse_response.assert_called_with(request.return_value)
def test__url_for(self, fake, params):
path = fake.pystr()
keys = list(params.keys())
result = self.client._url_for(path, params, keys)
camel = dict([(camelize(k, False), v) for k, v in params.items()])
assert result == urlunsplit(('https', self.client.DOMAIN, path,
urlencode(camel), None))
keys.pop()
with raises(TypeError):
self.client._url_for(path, params, keys)
def test__parse_response(self, fake):
url = fake.uri()
parse = self.client._parse_response
# json
response = Mock()
response.ok = True
response.headers = {}
assert parse(response) == response.json.return_value
# redirect
response = Mock()
response.ok = True
response.headers = {'location': url}
assert parse(response) == url
# binary
response = Mock()
response.ok = True
response.headers = {}
response.json.side_effect = Exception
assert parse(response) == response.content
# error
response = Mock()
response.ok = False
parse(response)
response.raise_for_status.assert_called_with()
def test_user_agent(self):
assert self.client.USER_AGENT.startswith('InterFAX Python ')
|
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'0', # 0x60
'1', # 0x61
'2', # 0x62
'3', # 0x63
'4', # 0x64
'5', # 0x65
'6', # 0x66
'7', # 0x67
'8', # 0x68
'9', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
|
|
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom widgets used for form fields.
"""
__authors__ = [
'"Pawel Solyga" <pawel.solyga@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from django import forms
from django.forms import util
from django.forms import widgets
from django.utils import encoding
from django.utils import html
from django.utils import simplejson
from django.utils import safestring
from soc.logic import dicts
class ReadOnlyInput(forms.widgets.Input):
"""Read only input widget.
"""
input_type = 'text'
def render(self, name, value, attrs=None):
"""Render ReadOnlyInput widget as HTML.
"""
attrs['readonly'] = 'readonly'
attrs['class'] = 'plaintext'
return super(ReadOnlyInput, self).render(name, value, attrs)
class PlainTextWidget(forms.widgets.Widget):
"""Read only input widget.
"""
def render(self, name, value, attrs=None):
"""Render ReadOnlyInput widget as HTML.
"""
escaped = html.escape(encoding.force_unicode(value))
return html.linebreaks(escaped) if escaped.find("\n") >= 0 else escaped
class HTMLTextWidget(forms.widgets.Widget):
"""Read only HTML input widget.
"""
def render(self, name, value, attrs=None):
"""Render HTMLText widget as HTML text.
"""
attrs['readonly'] = 'readonly'
attrs['class'] = 'plaintext'
return safestring.mark_safe(value)
class FullTinyMCE(forms.widgets.Textarea):
"""TinyMCE widget.
Requires to include tiny_mce_src.js in your template. Widget can be
customized by overwriting or adding extra options to mce_settings
dictionary
You can set TinyMCE widget for particular form field using code below:
class ExampleForm(helper.forms.BaseForm):
content = forms.fields.CharField(widget=helper.widgets.TinyMCE())
You can include tiny_mce.js in your template using:
{% block scripts %}
<script type="text/javascript" src="/tiny_mce/tiny_mce.js"></script>
{% endblock %}
"""
features1 = ("bold,italic,underline,strikethrough,|,"
"fontsizeselect,forecolor,|,"
"bullist,numlist,outdent,indent")
features2 = ("undo,redo,|,"
"justifyleft,justifycenter,justifyright,|,"
"link,unlink,anchor,code")
DEF_MCE_SETTINGS = {
'mode': "exact",
'theme': "advanced",
'theme_advanced_buttons1': features1,
'theme_advanced_buttons2': features2,
'theme_advanced_buttons3': '',
'theme_advanced_resizing': True,
'theme_advanced_toolbar_location': "top",
'theme_advanced_statusbar_location': "bottom",
'theme_advanced_path' : False,
'theme_advanced_toolbar_align': "left",
'relative_urls': 0,
'remove_script_host': 0,
}
TINY_MCE_HTML_FMT = u'''\
<textarea %(attrs)s>%(value)s</textarea>
<script type="text/javascript">
tinyMCE.init(%(settings_json)s)
</script>'''
def __init__(self, mce_settings=None, *args, **kwargs):
"""Initialize TinyMCE widget with default or customized settings.
Args:
mce_settings: dict with TinyMCE widget settings
*args, **kwargs: passed through to parent __init__() constructor
"""
super(FullTinyMCE, self).__init__(*args, **kwargs)
self.mce_settings = self.DEF_MCE_SETTINGS
def render(self, name, value, attrs=None):
"""Render TinyMCE widget as HTML.
"""
from soc.logic.models.user import logic as user_logic
user = user_logic.getCurrentUser()
if user and user.disable_tinymce:
return super(FullTinyMCE, self).render(name, value, attrs)
if value is None:
value = ''
value = encoding.smart_unicode(value)
final_attrs = self.build_attrs(attrs, name=name)
self.mce_settings['elements'] = "id_%s" % name
# convert mce_settings from dict to JSON
mce_json = simplejson.JSONEncoder().encode(self.mce_settings)
return safestring.mark_safe(self.TINY_MCE_HTML_FMT %
{'attrs': widgets.flatatt(final_attrs),
'value': html.escape(value),
'settings_json': mce_json})
class TinyMCE(FullTinyMCE):
"""Regular version of TinyMce
"""
def __init__(self, *args, **kwargs):
"""
"""
super(TinyMCE, self).__init__(*args, **kwargs)
keys = ['mode', 'theme', 'theme_advanced_toolbar_location',
'theme_advanced_toolbar_align', 'relative_urls',
'remove_script_host']
self.mce_settings = dicts.filter(self.mce_settings, keys)
class ReferenceField(forms.CharField):
"""Widget for selecting a reference to an Entity.
"""
def __init__(self, reference_url, filter=None, filter_fields=None,
group=None, example_text=None, *args, **kwargs):
"""Initializes the widget with the specified url and filter.
"""
self.rf = {}
self.rf['reference_url'] = reference_url
self.rf['filter'] = filter if filter else []
self.rf['filter_fields'] = filter_fields if filter_fields else {}
self.group = group if group else "0. "
self.example_text = example_text if example_text else ""
super(ReferenceField, self).__init__(*args, **kwargs)
class AgreementField(widgets.Widget):
"""Widget for selecting a reference to an Entity.
"""
HTML_CODE = """
<span style="width:450px" colspan="4">
<div id="ToS" style="overflow:auto;height:500px">
%(text)s
</div>
</span>
%(url)s
"""
def __init__(self, *args, **kwargs):
self.text = "No Agreement Text Specified"
self.url = ""
super(AgreementField, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
"""HTML renderer for Agreement field.
"""
url_text = '<a href="%s" target="_blank">Full Text (Printable Page)</a>'
url = url_text % self.url if self.url else ""
result = self.HTML_CODE % {'url': url, 'text': self.text}
return result
|
|
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_mlag_interface
short_description: Manage MLAG interfaces in EOS
description:
- The eos_mlag_interface module manages the MLAG interfaces on Arista
EOS nodes. This module is fully stateful and all configuration of
resources is idempotent unless otherwise specified.
version_added: 1.0.0
category: MLAG
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The interface name assocated with this resource. The interface
name must be the full interface identifier. Valid interfaces
match Po*
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
mlag_id:
description:
- Configures the interface mlag setting to the specified value. The
mlag setting is any valid number from 1 to 2000. A MLAG identifier
cannot be used on more than one interface.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
"""
EXAMPLES = """
- name: Ensure Ethernet1 is configured with mlag id 10
eos_mlag_interface: name=Ethernet1 state=present mlag_id=10
- name: Ensure Ethernet10 is not configured as mlag
eos_mlag_interface: name=Ethernet10 state=absent
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Mlag interface config from the node
"""
name = module.attributes['name']
response = module.node.api('mlag').get()
result = response['interfaces'].get(name)
_instance = dict(name=name, state='absent')
if result:
_instance['state'] = 'present'
_instance['mlag_id'] = result['mlag_id']
return _instance
def create(module):
"""Creates a new Mlag interface on the node
"""
name = module.attributes['name']
module.log('Invoked create for eos_mlag_interface[%s]' % name)
set_mlag_id(module)
def remove(module):
"""Removes an existing Mlag interface on the node
"""
name = module.attributes['name']
module.log('Invoked remove for eos_mlag_interface[%s]' % name)
set_mlag_id(module)
def set_mlag_id(module):
"""Configures the interface mlag id attribute
"""
value = module.attributes['mlag_id']
name = module.attributes['name']
module.log('Invoked set_mlag_id for eos_mlag_interface[%s] '
'with value %s' % (name, value))
module.node.api('mlag').set_mlag_id(name, value)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
mlag_id=dict(),
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('OP_NAME_NEW 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('OP_NAME_FIRSTUPDATE ' + public_key + ' ' + public_key + ' ' + public_key + ' OP_NAME_UPDATE OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('OP_NAME_FIRSTUPDATE ' + public_key + ' ' + uncompressed_public_key + ' OP_NAME_FIRSTUPDATE OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
|
from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import assert_in, assert_less, assert_greater
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
# Check the old interface
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
charset='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("charset" in str(w[0].message).lower())
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
vect = CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
vect = CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
CountVectorizer(vocabulary=[])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
# First we need to verify that numpy here provides div 0 warnings
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
with warnings.catch_warnings(record=True) as w:
tfidf = tr.fit_transform(X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
assert_equal(len(w), 1)
# For Python 3 compatibility
if hasattr(w[0].message, 'args'):
assert_true("divide by zero" in w[0].message.args[0])
else:
assert_true("divide by zero" in w[0].message)
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
assert_false(tv.fixed_vocabulary)
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_vectorizer_max_df():
test_data = ['abc', 'dea'] # the letter a occurs in both strings
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 5)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # 'a' is ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # the others remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 1)
# absolute count: if in more than one
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # 'a' is ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # the others remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 1)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat'] # the letter a occurs in both strings
vect = CountVectorizer(analyzer='char', max_df=1.0, min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # 'c' is ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # only e, a remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = .5
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # 'c' is ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # only e, a remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# simulate iterables
train_data = iter(data[1:-1])
test_data = iter([data[0], data[-1]])
# label junk food as -1, the others as +1
y = np.ones(len(data))
y[:6] = -1
y_train = y[1:-1]
y_test = np.array([y[0], y[-1]])
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators
pred = grid_search.fit(list(train_data), y_train).predict(list(test_data))
assert_array_equal(pred, y_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# simulate iterables
train_data = iter(data[1:-1])
test_data = iter([data[0], data[-1]])
# label junk food as -1, the others as +1
y = np.ones(len(data))
y[:6] = -1
y_train = y[1:-1]
y_test = np.array([y[0], y[-1]])
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('l1', 'l2'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators
pred = grid_search.fit(list(train_data), y_train).predict(list(test_data))
assert_array_equal(pred, y_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary)
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
assert_true(vect.fixed_vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
|
|
import json
import requests
class Buzzdata(object):
"""
Buzzdata API Client.
"""
class Error(Exception):
def __init__(self, response):
self.code = response.status_code
json = response.json
if json:
self.message = json['message']
else:
self.message = response.text
def __str__(self):
return "Buzzdata API Error: %s (%r)" % (self.message, self.code)
def __init__(self, access_token=None, base_url="https://buzzdata.com"):
"""
Create a new client instance.
**Parameters**:
:param base_url: a hive server's base URL, such as
'https://buzzdata.com' (default),
'https://myhive.buzzdata.com' and so on.
:access_token: an OAuth2 access token.
If not present, most of the calls will raise
Invalid OAuth Request.
**Obtaining an OAuth2 token**
Acquiring OAuth2 tokens, their management and storage is out of the
scope of this module. Please use a custom solution or find a third
party module.
For development purposes you can do something like this:
from requests_oauth2 import OAuth2
consumer_key = '...'
consumer_secret = '...'
base_url = 'https://buzzdata.com'
scope = 'api_access'
redirect_uri = 'http://localhost:8080/foobar'
handler = OAuth2(consumer_key, consumer_secret,
base_url, redirect_uri)
authorize_url = handler.authorize_url(scope, response_type='token')
print authorize_url
It should print a URL similar to this:
https://buzzdata.com/oauth/authorize?response_type=token&...
Copy the URL and paste it into the browser. Confirm authorization.
When redirected to the `redirect_uri`, copy the token and somehow paste
into your client. It's a good idea to use a local http server or a
service like http://requestb.in/ for intercepting token redirects.
"""
self.api_url = base_url + '/api'
if access_token is None:
params = None
else:
params = {'access_token': access_token}
self.client = requests.session(params=params)
# general info
def licenses(self):
return self._get("licenses")
def topics(self):
return self._get("topics")
def search(self, query):
return self._get("search", term=query)
# users
def user_info(self, username):
return self._get(username)['user']
def create_user(self, username, email, password):
return self._post("users", **form('user',
username=username,
email=email,
password=password))
# datarooms
def list_datarooms(self, username):
return self._get("%s/datasets/list" % username)
def create_dataroom(self, username, name, readme="", license="cc0",
topics=[], public=False):
result = self._post("%s/datasets" % username,
**form('dataset',
name=name,
readme=readme,
license=license,
topics=list(topics),
public=public))
return result['dataset']
def dataroom_overview(self, dataroom_id):
return self._get(dataroom_id)['dataset']
def delete_dataroom(self, dataroom_id):
return self._delete(dataroom_id)
# visualizations
def list_visualizations(self, dataroom_id):
return [dict(vis, id="%s/visualizations/%s" % (dataroom_id, vis['uuid']))
for vis in self._get("%s/visualizations" % dataroom_id)]
def create_visualization_from_url(self, dataroom_id, url, title=""):
return self._post("%s/visualizations/url" % dataroom_id,
url=url,
title=title)
def create_visualization_from_image(self, dataroom_id,
image_file, file_name, title=""):
return self._post("%s/visualizations/image" % dataroom_id,
files=dict(image=(file_name, image_file)),
title=title)
def delete_visualization(self, visualization_id):
return self._delete(visualization_id)
# datafiles
def list_datafiles(self, dataroom_id):
return self._get("%s/list_datafiles" % dataroom_id)
def create_datafile(self, dataroom_id, datafile_name):
result = self._post("%s/create_datafile" % dataroom_id,
data_file_name=datafile_name)
return (dataroom_id, result['datafile_uuid'])
def datafile_history(self, datafile_id):
return self._get("data_files/%s/history" % datafile_id[1])
def new_upload_request(self, datafile_id):
result = self._post("%s/upload_request?datafile_uuid=%s" % datafile_id)
return result['upload_request']
def upload_datafile(self, datafile_id, file, file_name, release_notes=""):
upload_request = self.new_upload_request(datafile_id)
# Prepare our request
post_url = upload_request.pop('url')
upload_request['release_notes'] = release_notes
return requests.post(post_url,
files={'file': (file_name, file)},
data=upload_request)
def get_download_url(self, datafile_id, version=None, type='CSV'):
type = type.upper()
if type not in ('CSV', 'XLS', 'XLSX'):
raise ValueError("Unknown file type '%s'" % type)
result = self.post_json("%s/%s/download_request" % datafile_id,
type=type,
version=version)
return result['download_request']['url']
def download_data(self, datafile_id):
return requests.get(self.get_download_url(datafile_id))
# staging
def create_stage(self, datafile_id):
result = self._post("%s/%s/stage" % datafile_id)
return datafile_id + (result['id'],)
def insert_rows(self, stage_id, rows):
return self._post("%s/%s/stage/%s/rows" % stage_id,
rows=json.dumps(rows))
def update_row(self, stage_id, row_number, row):
return self._put("%s/%s/stage/%s/rows/%d" % (stage_id + (row_number,)),
row=json.dumps(row))
def delete_row(self, stage_id, row_number):
return self._delete("%s/%s/stage/%s/rows/%d" % (stage_id + (row_number,)))
def commit_stage(self, stage_id):
return self._post("%s/%s/stage/%s/commit" % stage_id)
def rollback_stage(self, stage_id):
return self._post("%s/%s/stage/%s/rollback" % stage_id)
# private
def _request(self, method, path, params, data=None, files=None):
response = method(self.api_url + '/' + path,
data=data,
files=files)
if response.status_code > 400:
raise Buzzdata.Error(response)
return response.json
def _get(self, path, **params):
return self._request(self.client.get, path, params)
def _delete(self, path, **params):
return self._request(self.client.delete, path, params)
def _put(self, path, **data):
return self._request(self.client.put, path, {}, data=data)
def _post(self, path, files=None, **data):
return self._request(self.client.post, path, {}, data=data, files=files)
def form(formname, **fields):
return dict(('%s[%s]' % (formname, fieldname), value)
for fieldname, value in fields.items())
|
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This module is intended to be used as a singleton object.
# It's purpose is to store in one global all of the data that would
# be to annoying to lug around otherwise.
import renpy.display
# The basepath.
basepath = None
# A list of paths that we search to load things. This is searched for
# everything that can be loaded, before archives are used.
searchpath = [ ]
# The options that were read off the command line.
args = None
# The game's script.
script = None
# A stack of execution contexts.
contexts = [ ]
# The interface that the game uses to interact with the user.
interface = None
# Are we inside lint?
lint = False
# The RollbackLog that keeps track of changes to the game state
# and to the store.
log = None
# Some useful additional information about program execution that
# can be added to the exception.
exception_info = ''
# Used to store style information.
style = None
# The set of statements we've seen in this session.
seen_session = { }
# The number of entries in persistent._seen_translates that are also in
# the current game.
seen_translates_count = 0
# The number of new translates we've seen today.
new_translates_count = 0
# True if we're in the first interaction after a rollback or rollforward.
after_rollback = False
# Code that's run after the init code.
post_init = [ ]
# Should we attempt to run in a mode that uses less memory?
less_memory = False
# Should we attempt to run in a mode that minimizes the number
# of screen updates?
less_updates = False
# Should we never show the mouse?
less_mouse = False
# Should we not imagedissiolve?
less_imagedissolve = False
# The persistent data that's kept from session to session
persistent = None
# The current preferences.
preferences = None
class ExceptionInfo(object):
"""
Context manager that sets exception_info iff an exception occurs.
`s`
A percent-format string to use.
`args`
The arguments that are percent-formatted with `s`.
"""
def __init__(self, s, args):
self.s = s
self.args = args
def __enter__(self):
return
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
renpy.game.exception_info = self.s % self.args
return False
class RestartContext(Exception):
"""
Restarts the current context. If `label` is given, calls that label
in the restarted context.
"""
def __init__(self, label):
self.label = label
class RestartTopContext(Exception):
"""
Restarts the top context. If `label` is given, calls that label
in the restarted context.
"""
def __init__(self, label):
self.label = label
class FullRestartException(Exception):
"""
An exception of this type forces a hard restart, completely
destroying the store and config and so on.
"""
def __init__(self, reason="end_game"): # W0231
self.reason = reason
class UtterRestartException(Exception):
"""
An exception of this type forces an even harder restart, causing
Ren'Py and the script to be reloaded.
"""
class QuitException(Exception):
"""
An exception of this class will let us force a safe quit, from
anywhere in the program.
`relaunch`
If given, the program will run another copy of itself, with the
same arguments.
`status`
The status code Ren'Py will return to the operating system.
"""
def __init__(self, relaunch=False, status=0):
Exception.__init__(self)
self.relaunch = relaunch
self.status = status
class JumpException(Exception):
"""
This should be raised with a label as the only argument. This causes
the current statement to terminate, and execution to be transferred
to the named label.
"""
class JumpOutException(Exception):
"""
This should be raised with a label as the only argument. This exits
the current context, and then raises a JumpException.
"""
class CallException(Exception):
"""
Raise this exception to cause the current statement to terminate,
and control to be transferred to the named label.
"""
from_current = False
def __init__(self, label, args, kwargs, from_current=False):
Exception.__init__(self)
self.label = label
self.args = args
self.kwargs = kwargs
self.from_current = from_current
def __reduce__(self):
return (CallException, (self.label, self.args, self.kwargs, self.from_current))
class EndReplay(Exception):
"""
Raise this exception to end the current replay (the current call to
call_replay).
"""
class ParseErrorException(Exception):
"""
This is raised when a parse error occurs, after it has been
reported to the user.
"""
# A tuple of exceptions that should not be caught by the
# exception reporting mechanism.
CONTROL_EXCEPTIONS = (
RestartContext,
RestartTopContext,
FullRestartException,
UtterRestartException,
QuitException,
JumpException,
JumpOutException,
CallException,
EndReplay,
ParseErrorException,
KeyboardInterrupt,
)
def context(index=-1):
"""
Return the current execution context, or the context at the
given index if one is specified.
"""
return contexts[index]
def invoke_in_new_context(callable, *args, **kwargs): # @ReservedAssignment
"""
:doc: label
This function creates a new context, and invokes the given Python
callable (function) in that context. When the function returns
or raises an exception, control returns to the the original context.
It's generally used to call a Python function that needs to display
information to the player (like a confirmation prompt) from inside
an event handler.
A context maintains the state of the display (including what screens
and images are being shown) and the audio system. Both are restored
when the context returns.
Additional arguments and keyword arguments are passed to the
callable.
A context created with this function cannot execute Ren'Py script.
Functions that would change the flow of Ren'Py script, like
:func:`renpy.jump`, are handled by the outer context. If you want
to call Ren'Py script rather than a Python function, use
:func:`renpy.call_in_new_context` instead.
"""
context = renpy.execution.Context(False, contexts[-1], clear=True)
contexts.append(context)
if renpy.display.interface is not None:
renpy.display.interface.enter_context()
try:
return callable(*args, **kwargs)
except renpy.game.JumpOutException as e:
contexts[-2].force_checkpoint = True
raise renpy.game.JumpException(e.args[0])
finally:
context.pop_all_dynamic()
contexts.pop()
contexts[-1].do_deferred_rollback()
if interface and interface.restart_interaction and contexts:
contexts[-1].scene_lists.focused = None
def call_in_new_context(label, *args, **kwargs):
"""
:doc: label
This creates a new context, and then starts executing Ren'Py script
from the given label in that context. Rollback is disabled in the
new context, and saving/loading will occur in the top level
context.
This is used to begin a second interaction with the player
Use this to begin a second interaction with the user while
inside an interaction.
"""
context = renpy.execution.Context(False, contexts[-1], clear=True)
contexts.append(context)
if renpy.display.interface is not None:
renpy.display.interface.enter_context()
if args:
renpy.store._args = args
else:
renpy.store._args = None
if kwargs:
renpy.store._kwargs = renpy.python.RevertableDict(kwargs)
else:
renpy.store._kwargs = None
try:
context.goto_label(label)
return renpy.execution.run_context(False)
except renpy.game.JumpOutException as e:
contexts[-2].force_checkpoint = True
raise renpy.game.JumpException(e.args[0])
finally:
contexts.pop()
contexts[-1].do_deferred_rollback()
if interface and interface.restart_interaction and contexts:
contexts[-1].scene_lists.focused = None
def call_replay(label, scope={}):
"""
:doc: replay
Calls a label as a memory.
Keyword arguments are used to set the initial values of variables in the
memory context.
"""
renpy.game.log.complete()
old_log = renpy.game.log
renpy.game.log = renpy.python.RollbackLog()
sb = renpy.python.StoreBackup()
renpy.python.clean_stores()
context = renpy.execution.Context(True)
contexts.append(context)
if renpy.display.interface is not None:
renpy.display.interface.enter_context()
# This has to be here, to ensure the scope stuff works.
renpy.exports.execute_default_statement()
for k, v in renpy.config.replay_scope.iteritems():
setattr(renpy.store, k, v)
for k, v in scope.iteritems():
setattr(renpy.store, k, v)
renpy.store._in_replay = label
try:
context.goto_label("_start_replay")
renpy.execution.run_context(False)
except EndReplay:
pass
finally:
context.pop_all_dynamic()
contexts.pop()
renpy.game.log = old_log
sb.restore()
if interface and interface.restart_interaction and contexts:
contexts[-1].scene_lists.focused = None
renpy.config.skipping = None
if renpy.config.after_replay_callback:
renpy.config.after_replay_callback()
# Type information.
if False:
script = renpy.script.Script()
interface = renpy.display.core.Interface()
log = renpy.python.RollbackLog()
preferences = renpy.preferences.Preferences()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast-Fourier Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
"""Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
fft_shape = _tensor_util.constant_value_as_shape(fft_length)
# Edge case: skip padding empty tensors.
if (input_tensor.shape.ndims is not None and
any(dim.value == 0 for dim in input_tensor.shape.dims)):
return input_tensor
# If we know the shapes ahead of time, we can either skip or pre-compute the
# appropriate paddings. Otherwise, fall back to computing paddings in
# TensorFlow.
if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
# Slice the last FFT-rank dimensions from input_tensor's shape.
input_fft_shape = input_tensor.shape[-fft_shape.ndims:]
if input_fft_shape.is_fully_defined():
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_shape = fft_shape[:-1].concatenate(
fft_shape.dims[-1].value // 2 + 1)
paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
for fft_dim, input_dim in zip(
fft_shape.dims, input_fft_shape.dims)]
if any(pad > 0 for _, pad in paddings):
outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
fft_shape.ndims), 0)
return _array_ops.pad(input_tensor, outer_paddings + paddings)
return input_tensor
# If we can't determine the paddings ahead of time, then we have to pad. If
# the paddings end up as zero, tf.pad has a special-case that does no work.
input_rank = _array_ops.rank(input_tensor)
input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_length = _array_ops.concat([fft_length[:-1],
fft_length[-1:] // 2 + 1], 0)
fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
axis=1)
return _array_ops.pad(input_tensor, paddings)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor,
preferred_dtype=_dtypes.float32)
if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64):
raise ValueError(
"RFFT requires tf.float32 or tf.float64 inputs, got: %s" %
input_tensor)
real_dtype = input_tensor.dtype
if real_dtype == _dtypes.float32:
complex_dtype = _dtypes.complex64
else:
assert real_dtype == _dtypes.float64
complex_dtype = _dtypes.complex128
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)
fft_length_static = _tensor_util.constant_value(fft_length)
if fft_length_static is not None:
fft_length = fft_length_static
return fft_fn(input_tensor, fft_length, Tcomplex=complex_dtype, name=name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
"""Wrapper irfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor,
preferred_dtype=_dtypes.complex64)
input_tensor.shape.with_rank_at_least(fft_rank)
if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128):
raise ValueError(
"IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s" %
input_tensor)
complex_dtype = input_tensor.dtype
real_dtype = complex_dtype.real_dtype
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,
is_reverse=True)
fft_length_static = _tensor_util.constant_value(fft_length)
if fft_length_static is not None:
fft_length = fft_length_static
return ifft_fn(input_tensor, fft_length, Treal=real_dtype, name=name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
# FFT/IFFT 1/2/3D are exported via
# third_party/tensorflow/core/api_def/python_api/
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft)
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft)
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d)
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d)
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d)
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d)
def _fft_size_for_grad(grad, rank):
return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:])
@_ops.RegisterGradient("FFT")
def _fft_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype)
return ifft(grad) * size
@_ops.RegisterGradient("IFFT")
def _ifft_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype),
grad.dtype)
return fft(grad) * rsize
@_ops.RegisterGradient("FFT2D")
def _fft2d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype)
return ifft2d(grad) * size
@_ops.RegisterGradient("IFFT2D")
def _ifft2d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype),
grad.dtype)
return fft2d(grad) * rsize
@_ops.RegisterGradient("FFT3D")
def _fft3d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype)
return ifft3d(grad) * size
@_ops.RegisterGradient("IFFT3D")
def _ifft3d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype),
grad.dtype)
return fft3d(grad) * rsize
def _rfft_grad_helper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
complex_dtype = grad.dtype
real_dtype = complex_dtype.real_dtype
input_shape = _array_ops.shape(op.inputs[0])
is_even = _math_ops.cast(1 - (fft_length[-1] % 2), complex_dtype)
def _tile_for_broadcasting(matrix, t):
expanded = _array_ops.reshape(
matrix,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32),
_array_ops.shape(matrix)
], 0))
return _array_ops.tile(
expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0))
def _mask_matrix(length):
"""Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len)."""
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = _array_ops.tile(
_array_ops.expand_dims(_math_ops.range(length), 0), (length, 1))
b = _array_ops.transpose(a, [1, 0])
return _math_ops.exp(
-2j * np.pi * _math_ops.cast(a * b, complex_dtype) /
_math_ops.cast(length, complex_dtype))
def _ymask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2),
complex_dtype)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _ymask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _mask_matrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _tile_for_broadcasting(base_mask, y0)
y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = _array_ops.tile(
ym_term,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32),
[inner_dim]
], 0)) * _ymask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), real_dtype)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
return _grad
def _irfft_grad_helper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
fft_length_static = _tensor_util.constant_value(fft_length)
if fft_length_static is not None:
fft_length = fft_length_static
real_dtype = grad.dtype
if real_dtype == _dtypes.float32:
complex_dtype = _dtypes.complex64
elif real_dtype == _dtypes.float64:
complex_dtype = _dtypes.complex128
is_odd = _math_ops.mod(fft_length[-1], 2)
input_last_dimension = _array_ops.shape(op.inputs[0])[-1]
mask = _array_ops.concat(
[[1.0], 2.0 * _array_ops.ones(
[input_last_dimension - 2 + is_odd], real_dtype),
_array_ops.ones([1 - is_odd], real_dtype)], 0)
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), real_dtype))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
the_rfft = rfft_fn(grad, fft_length)
return the_rfft * _math_ops.cast(rsize * mask, complex_dtype), None
return _grad
@tf_export("signal.fftshift")
def fftshift(x, axes=None, name=None):
"""Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
@compatibility(numpy)
Equivalent to numpy.fft.fftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html
@end_compatibility
For example:
```python
x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple`, optional Axes over which to shift. Default is
None, which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "fftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = _array_ops.shape(x) // 2
elif isinstance(axes, int):
shift = _array_ops.shape(x)[axes] // 2
else:
shift = _array_ops.gather(_array_ops.shape(x), axes) // 2
return manip_ops.roll(x, shift, axes, name)
@tf_export("signal.ifftshift")
def ifftshift(x, axes=None, name=None):
"""The inverse of fftshift.
Although identical for even-length x,
the functions differ by one sample for odd-length x.
@compatibility(numpy)
Equivalent to numpy.fft.ifftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html
@end_compatibility
For example:
```python
x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])
x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None,
which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "ifftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = -(_array_ops.shape(x) // 2)
elif isinstance(axes, int):
shift = -(_array_ops.shape(x)[axes] // 2)
else:
shift = -(_array_ops.gather(_array_ops.shape(x), axes) // 2)
return manip_ops.roll(x, shift, axes, name)
_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft))
_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft))
_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d))
_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
import pytest
from airflow.models import Connection
from airflow.providers.dingding.hooks.dingding import DingdingHook
from airflow.utils import db
class TestDingdingHook(unittest.TestCase):
conn_id = 'dingding_conn_id_test'
def setUp(self):
db.merge_conn(
Connection(
conn_id=self.conn_id,
conn_type='dingding',
host='https://oapi.dingtalk.com',
password='you_token_here',
)
)
def test_get_endpoint_conn_id(self):
hook = DingdingHook(dingding_conn_id=self.conn_id)
endpoint = hook._get_endpoint()
assert 'robot/send?access_token=you_token_here' == endpoint
def test_build_text_message_not_remind(self):
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'text',
'message': 'Airflow dingding text message remind no one',
'at_mobiles': False,
'at_all': False,
}
expect = {
'msgtype': 'text',
'text': {'content': 'Airflow dingding text message remind no one'},
'at': {'atMobiles': False, 'isAtAll': False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_text_message_remind_specific(self):
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'text',
'message': 'Airflow dingding text message remind specific users',
'at_mobiles': ['1234', '5768'],
'at_all': False,
}
expect = {
'msgtype': 'text',
'text': {'content': 'Airflow dingding text message remind specific users'},
'at': {'atMobiles': ['1234', '5768'], 'isAtAll': False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_text_message_remind_all(self):
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'text',
'message': 'Airflow dingding text message remind all user in group',
'at_all': True,
}
expect = {
'msgtype': 'text',
'text': {'content': 'Airflow dingding text message remind all user in group'},
'at': {'atMobiles': None, 'isAtAll': True},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_markdown_message_remind_specific(self):
msg = {
'title': 'Airflow dingding markdown message',
'text': '# Markdown message title\ncontent content .. \n### sub-title\n'
'',
}
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'markdown',
'message': msg,
'at_mobiles': ['1234', '5678'],
'at_all': False,
}
expect = {
'msgtype': 'markdown',
'markdown': msg,
'at': {'atMobiles': ['1234', '5678'], 'isAtAll': False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_markdown_message_remind_all(self):
msg = {
'title': 'Airflow dingding markdown message',
'text': '# Markdown message title\ncontent content .. \n### sub-title\n'
'',
}
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'markdown',
'message': msg,
'at_all': True,
}
expect = {'msgtype': 'markdown', 'markdown': msg, 'at': {'atMobiles': None, 'isAtAll': True}}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_link_message(self):
msg = {
'title': 'Airflow dingding link message',
'text': 'Airflow official documentation link',
'messageUrl': 'https://airflow.apache.org',
'picURL': 'https://airflow.apache.org/_images/pin_large.png',
}
config = {'dingding_conn_id': self.conn_id, 'message_type': 'link', 'message': msg}
expect = {'msgtype': 'link', 'link': msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_single_action_card_message(self):
msg = {
'title': 'Airflow dingding single actionCard message',
'text': 'Airflow dingding single actionCard message\n'
'\n'
'This is a official logo in Airflow website.',
'hideAvatar': '0',
'btnOrientation': '0',
'singleTitle': 'read more',
'singleURL': 'https://airflow.apache.org',
}
config = {'dingding_conn_id': self.conn_id, 'message_type': 'actionCard', 'message': msg}
expect = {'msgtype': 'actionCard', 'actionCard': msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_multi_action_card_message(self):
msg = {
'title': 'Airflow dingding multi actionCard message',
'text': 'Airflow dingding multi actionCard message\n'
'\n'
'Airflow documentation and GitHub',
'hideAvatar': '0',
'btnOrientation': '0',
'btns': [
{'title': 'Airflow Documentation', 'actionURL': 'https://airflow.apache.org'},
{'title': 'Airflow GitHub', 'actionURL': 'https://github.com/apache/airflow'},
],
}
config = {'dingding_conn_id': self.conn_id, 'message_type': 'actionCard', 'message': msg}
expect = {'msgtype': 'actionCard', 'actionCard': msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_feed_card_message(self):
msg = {
"links": [
{
"title": "Airflow DAG feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/dags.png",
},
{
"title": "Airflow tree feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/tree.png",
},
{
"title": "Airflow graph feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/graph.png",
},
]
}
config = {'dingding_conn_id': self.conn_id, 'message_type': 'feedCard', 'message': msg}
expect = {'msgtype': 'feedCard', 'feedCard': msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_send_not_support_type(self):
config = {
'dingding_conn_id': self.conn_id,
'message_type': 'not_support_type',
'message': 'Airflow dingding text message remind no one',
}
hook = DingdingHook(**config)
with pytest.raises(ValueError):
hook.send()
|
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter and from
# https://github.com/modestyachts/ARS
from collections import namedtuple
import logging
import numpy as np
import time
import ray
from ray.rllib.agents import Trainer, with_common_config
from ray.rllib.agents.ars.ars_tf_policy import ARSTFPolicy
from ray.rllib.agents.es import optimizers, utils
from ray.rllib.agents.es.es import validate_config
from ray.rllib.agents.es.es_tf_policy import rollout
from ray.rllib.env.env_context import EnvContext
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import override
from ray.rllib.utils import FilterManager
logger = logging.getLogger(__name__)
Result = namedtuple("Result", [
"noise_indices", "noisy_returns", "sign_noisy_returns", "noisy_lengths",
"eval_returns", "eval_lengths"
])
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
"action_noise_std": 0.0,
"noise_stdev": 0.02, # std deviation of parameter noise
"num_rollouts": 32, # number of perturbs to try
"rollouts_used": 32, # number of perturbs to keep in gradient estimate
"num_workers": 2,
"sgd_stepsize": 0.01, # sgd step-size
"observation_filter": "MeanStdFilter",
"noise_size": 250000000,
"eval_prob": 0.03, # probability of evaluating the parameter rewards
"report_length": 10, # how many of the last rewards we average over
"offset": 0,
})
# __sphinx_doc_end__
# yapf: enable
@ray.remote
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers."""
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable:
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, dim):
return np.random.randint(0, len(self.noise) - dim + 1)
def get_delta(self, dim):
idx = self.sample_index(dim)
return idx, self.get(idx, dim)
@ray.remote
class Worker:
def __init__(self,
config,
env_creator,
noise,
worker_index,
min_task_runtime=0.2):
self.min_task_runtime = min_task_runtime
self.config = config
self.config["single_threaded"] = True
self.noise = SharedNoiseTable(noise)
env_context = EnvContext(config["env_config"] or {}, worker_index)
self.env = env_creator(env_context)
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(self.env)
policy_cls = get_policy_class(config)
self.policy = policy_cls(self.env.observation_space,
self.env.action_space, config)
@property
def filters(self):
return {DEFAULT_POLICY_ID: self.policy.observation_filter}
def sync_filters(self, new_filters):
for k in self.filters:
self.filters[k].sync(new_filters[k])
def get_filters(self, flush_after=False):
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
def rollout(self, timestep_limit, add_noise=False):
rollout_rewards, rollout_fragment_length = rollout(
self.policy,
self.env,
timestep_limit=timestep_limit,
add_noise=add_noise,
offset=self.config["offset"])
return rollout_rewards, rollout_fragment_length
def do_rollouts(self, params, timestep_limit=None):
# Set the network weights.
self.policy.set_flat_weights(params)
noise_indices, returns, sign_returns, lengths = [], [], [], []
eval_returns, eval_lengths = [], []
# Perform some rollouts with noise.
while (len(noise_indices) == 0):
if np.random.uniform() < self.config["eval_prob"]:
# Do an evaluation run with no perturbation.
self.policy.set_flat_weights(params)
rewards, length = self.rollout(timestep_limit, add_noise=False)
eval_returns.append(rewards.sum())
eval_lengths.append(length)
else:
# Do a regular run with parameter perturbations.
noise_index = self.noise.sample_index(self.policy.num_params)
perturbation = self.config["noise_stdev"] * self.noise.get(
noise_index, self.policy.num_params)
# These two sampling steps could be done in parallel on
# different actors letting us update twice as frequently.
self.policy.set_flat_weights(params + perturbation)
rewards_pos, lengths_pos = self.rollout(timestep_limit)
self.policy.set_flat_weights(params - perturbation)
rewards_neg, lengths_neg = self.rollout(timestep_limit)
noise_indices.append(noise_index)
returns.append([rewards_pos.sum(), rewards_neg.sum()])
sign_returns.append(
[np.sign(rewards_pos).sum(),
np.sign(rewards_neg).sum()])
lengths.append([lengths_pos, lengths_neg])
return Result(
noise_indices=noise_indices,
noisy_returns=returns,
sign_noisy_returns=sign_returns,
noisy_lengths=lengths,
eval_returns=eval_returns,
eval_lengths=eval_lengths)
def get_policy_class(config):
if config["framework"] == "torch":
from ray.rllib.agents.ars.ars_torch_policy import ARSTorchPolicy
policy_cls = ARSTorchPolicy
else:
policy_cls = ARSTFPolicy
return policy_cls
class ARSTrainer(Trainer):
"""Large-scale implementation of Augmented Random Search in Ray."""
_name = "ARS"
_default_config = DEFAULT_CONFIG
@override(Trainer)
def _init(self, config, env_creator):
validate_config(config)
env_context = EnvContext(config["env_config"] or {}, worker_index=0)
env = env_creator(env_context)
policy_cls = get_policy_class(config)
self.policy = policy_cls(env.observation_space, env.action_space,
config)
self.optimizer = optimizers.SGD(self.policy, config["sgd_stepsize"])
self.rollouts_used = config["rollouts_used"]
self.num_rollouts = config["num_rollouts"]
self.report_length = config["report_length"]
# Create the shared noise table.
logger.info("Creating shared noise table.")
noise_id = create_shared_noise.remote(config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
logger.info("Creating actors.")
self.workers = [
Worker.remote(config, env_creator, noise_id, idx + 1)
for idx in range(config["num_workers"])
]
self.episodes_so_far = 0
self.reward_list = []
self.tstart = time.time()
@override(Trainer)
def get_policy(self, policy=DEFAULT_POLICY_ID):
if policy != DEFAULT_POLICY_ID:
raise ValueError("ARS has no policy '{}'! Use {} "
"instead.".format(policy, DEFAULT_POLICY_ID))
return self.policy
@override(Trainer)
def step(self):
config = self.config
theta = self.policy.get_flat_weights()
assert theta.dtype == np.float32
assert len(theta.shape) == 1
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["num_rollouts"])
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (len(all_noise_indices) == len(all_training_returns) ==
len(all_training_lengths))
self.episodes_so_far += num_episodes
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# keep only the best returns
# select top performing directions if rollouts_used < num_rollouts
max_rewards = np.max(noisy_returns, axis=1)
if self.rollouts_used > self.num_rollouts:
self.rollouts_used = self.num_rollouts
percentile = 100 * (1 - (self.rollouts_used / self.num_rollouts))
idx = np.arange(max_rewards.size)[
max_rewards >= np.percentile(max_rewards, percentile)]
noise_idx = noise_indices[idx]
noisy_returns = noisy_returns[idx, :]
# Compute and take a step.
g, count = utils.batched_weighted_sum(
noisy_returns[:, 0] - noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params)
for index in noise_idx),
batch_size=min(500, noisy_returns[:, 0].size))
g /= noise_idx.size
# scale the returns by their standard deviation
if not np.isclose(np.std(noisy_returns), 0.0):
g /= np.std(noisy_returns)
assert (g.shape == (self.policy.num_params, )
and g.dtype == np.float32)
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g)
# Set the new weights in the local copy of the policy.
self.policy.set_flat_weights(theta)
# update the reward list
if len(all_eval_returns) > 0:
self.reward_list.append(eval_returns.mean())
# Now sync the filters
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.observation_filter
}, self.workers)
info = {
"weights_norm": np.square(theta).sum(),
"weights_std": np.std(theta),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
}
result = dict(
episode_reward_mean=np.mean(
self.reward_list[-self.report_length:]),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info)
return result
@override(Trainer)
def cleanup(self):
# workaround for https://github.com/ray-project/ray/issues/1516
for w in self.workers:
w.__ray_terminate__.remote()
@override(Trainer)
def compute_action(self, observation, *args, **kwargs):
action = self.policy.compute_actions(observation, update=True)[0]
if kwargs.get("full_fetch"):
return action, [], {}
return action
def _collect_results(self, theta_id, min_episodes):
num_episodes, num_timesteps = 0, 0
results = []
while num_episodes < min_episodes:
logger.debug(
"Collected {} episodes {} timesteps so far this iter".format(
num_episodes, num_timesteps))
rollout_ids = [
worker.do_rollouts.remote(theta_id) for worker in self.workers
]
# Get the results of the rollouts.
for result in ray.get(rollout_ids):
results.append(result)
# Update the number of episodes and the number of timesteps
# keeping in mind that result.noisy_lengths is a list of lists,
# where the inner lists have length 2.
num_episodes += sum(len(pair) for pair in result.noisy_lengths)
num_timesteps += sum(
sum(pair) for pair in result.noisy_lengths)
return results, num_episodes, num_timesteps
def __getstate__(self):
return {
"weights": self.policy.get_flat_weights(),
"filter": self.policy.observation_filter,
"episodes_so_far": self.episodes_so_far,
}
def __setstate__(self, state):
self.episodes_so_far = state["episodes_so_far"]
self.policy.set_flat_weights(state["weights"])
self.policy.observation_filter = state["filter"]
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.observation_filter
}, self.workers)
|
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for mb.py."""
import json
import StringIO
import os
import sys
import unittest
import mb
class FakeMBW(mb.MetaBuildWrapper):
def __init__(self, win32=False):
super(FakeMBW, self).__init__()
# Override vars for test portability.
if win32:
self.chromium_src_dir = 'c:\\fake_src'
self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl'
self.platform = 'win32'
self.executable = 'c:\\python\\python.exe'
self.sep = '\\'
else:
self.chromium_src_dir = '/fake_src'
self.default_config = '/fake_src/tools/mb/mb_config.pyl'
self.executable = '/usr/bin/python'
self.platform = 'linux2'
self.sep = '/'
self.files = {}
self.calls = []
self.cmds = []
self.cross_compile = None
self.out = ''
self.err = ''
self.rmdirs = []
def ExpandUser(self, path):
return '$HOME/%s' % path
def Exists(self, path):
return self.files.get(path) is not None
def MaybeMakeDirectory(self, path):
self.files[path] = True
def PathJoin(self, *comps):
return self.sep.join(comps)
def ReadFile(self, path):
return self.files[path]
def WriteFile(self, path, contents, force_verbose=False):
self.files[path] = contents
def Call(self, cmd, env=None, buffer_output=True):
if env:
self.cross_compile = env.get('GYP_CROSSCOMPILE')
self.calls.append(cmd)
if self.cmds:
return self.cmds.pop(0)
return 0, '', ''
def Print(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
f = kwargs.get('file', sys.stdout)
if f == sys.stderr:
self.err += sep.join(args) + end
else:
self.out += sep.join(args) + end
def TempFile(self, mode='w'):
return FakeFile(self.files)
def RemoveFile(self, path):
del self.files[path]
def RemoveDirectory(self, path):
self.rmdirs.append(path)
files_to_delete = [f for f in self.files if f.startswith(path)]
for f in files_to_delete:
self.files[f] = None
class FakeFile(object):
def __init__(self, files):
self.name = '/tmp/file'
self.buf = ''
self.files = files
def write(self, contents):
self.buf += contents
def close(self):
self.files[self.name] = self.buf
TEST_CONFIG = """\
{
'common_dev_configs': ['gn_debug'],
'configs': {
'gyp_rel_bot': ['gyp', 'rel', 'goma'],
'gn_debug': ['gn', 'debug', 'goma'],
'gyp_debug': ['gyp', 'debug'],
'gn_rel_bot': ['gn', 'rel', 'goma'],
'private': ['gyp', 'rel', 'fake_feature1'],
'unsupported': ['gn', 'fake_feature2'],
},
'masters': {
'chromium': {},
'fake_master': {
'fake_builder': 'gyp_rel_bot',
'fake_gn_builder': 'gn_rel_bot',
'fake_gyp_builder': 'gyp_debug',
},
},
'mixins': {
'fake_feature1': {
'gn_args': 'enable_doom_melon=true',
'gyp_crosscompile': True,
'gyp_defines': 'doom_melon=1',
},
'fake_feature2': {
'gn_args': 'enable_doom_melon=false',
'gyp_defaults': 'doom_melon=0',
},
'gyp': {'type': 'gyp'},
'gn': {'type': 'gn'},
'goma': {
'gn_args': 'use_goma=true goma_dir="$(goma_dir)"',
'gyp_defines': 'goma=1 gomadir=$(goma_dir)',
},
'rel': {
'gn_args': 'is_debug=false',
},
'debug': {
'gn_args': 'is_debug=true',
},
},
'private_configs': ['private'],
'unsupported_configs': ['unsupported'],
}
"""
TEST_BAD_CONFIG = """\
{
'common_dev_configs': ['gn_rel_bot_1'],
'configs': {
'gn_rel_bot_1': ['gn', 'rel', 'chrome_with_codecs'],
'gn_rel_bot_2': ['gn', 'rel', 'bad_nested_config'],
},
'masters': {
'chromium': {
'a': 'gn_rel_bot_1',
'b': 'gn_rel_bot_2',
},
},
'mixins': {
'gn': {'type': 'gn'},
'chrome_with_codecs': {
'gn_args': 'proprietary_codecs=true',
},
'bad_nested_config': {
'mixins': ['chrome_with_codecs'],
},
'rel': {
'gn_args': 'is_debug=false',
},
},
'private_configs': ['private'],
'unsupported_configs': ['unsupported'],
}
"""
TEST_BAD_CONFIG_ERR = """\
mb config file /fake_src/tools/mb/mb_config.pyl has problems:
Config "gn_rel_bot_1" used by a bot is also listed in "common_dev_configs".
Unknown config "unsupported" referenced from "unsupported_configs".
Unknown config "private" referenced from "private_configs".
Public artifact builder "a" can not contain the "chrome_with_codecs" mixin.
Public artifact builder "b" can not contain the "chrome_with_codecs" mixin."""
class UnitTest(unittest.TestCase):
def fake_mbw(self, files=None, win32=False):
mbw = FakeMBW(win32=win32)
mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
if files:
for path, contents in files.items():
mbw.files[path] = contents
return mbw
def check(self, args, mbw=None, files=None, out=None, err=None, ret=None,
exception=None):
if not mbw:
mbw = self.fake_mbw(files)
mbw.ParseArgs(args)
actual_ret = None
if exception is not None:
self.assertRaisesRegexp(Exception, exception, mbw.args.func)
else:
actual_ret = mbw.args.func()
self.assertEqual(actual_ret, ret)
if out is not None:
self.assertEqual(mbw.out, out)
if err is not None:
self.assertEqual(mbw.err, err)
return mbw
def test_clobber(self):
files = {
'/fake_src/out/Debug': None,
'/fake_src/out/Debug/mb_type': None,
}
mbw = self.fake_mbw(files)
# The first time we run this, the build dir doesn't exist, so no clobber.
self.check(['gen', '-c', 'gn_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, [])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
# The second time we run this, the build dir exists and matches, so no
# clobber.
self.check(['gen', '-c', 'gn_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, [])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
# Now we switch build types; this should result in a clobber.
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
# Now we delete mb_type; this checks the case where the build dir
# exists but wasn't populated by mb; this should also result in a clobber.
del mbw.files['/fake_src/out/Debug/mb_type']
self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
self.assertEqual(mbw.rmdirs,
['/fake_src/out/Debug', '/fake_src/out/Debug'])
self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
def test_gn_analyze(self):
files = {'/tmp/in.json': """{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["foo_unittests", "bar_unittests"],
"additional_compile_targets": []
}"""}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (
0, 'out/Default/foo_unittests\n', '')
self.check(['analyze', '-c', 'gn_debug', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'Found dependency',
'compile_targets': ['foo_unittests'],
'test_targets': ['foo_unittests']
})
def test_gn_analyze_all(self):
files = {'/tmp/in.json': """{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["bar_unittests"],
"additional_compile_targets": ["all"]
}"""}
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (
0, 'out/Default/foo_unittests\n', '')
self.check(['analyze', '-c', 'gn_debug', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'Found dependency (all)',
'compile_targets': ['all', 'bar_unittests'],
'test_targets': ['bar_unittests'],
})
def test_gn_analyze_missing_file(self):
files = {'/tmp/in.json': """{\
"files": ["foo/foo_unittest.cc"],
"test_targets": ["bar_unittests"],
"additional_compile_targets": []
}"""}
mbw = self.fake_mbw(files)
mbw.cmds = [
(0, '', ''),
(1, 'The input matches no targets, configs, or files\n', ''),
(1, 'The input matches no targets, configs, or files\n', ''),
]
self.check(['analyze', '-c', 'gn_debug', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
})
def test_gn_gen(self):
self.check(['gen', '-c', 'gn_debug', '//out/Default', '-g', '/goma'],
ret=0,
out=('/fake_src/buildtools/linux64/gn gen //out/Default '
'\'--args=is_debug=true use_goma=true goma_dir="/goma"\' '
'--check\n'))
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'gn_debug', '-g', 'c:\\goma', '//out/Debug'],
mbw=mbw, ret=0,
out=('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
'"--args=is_debug=true use_goma=true goma_dir=\\"'
'c:\\goma\\"" --check\n'))
def test_gn_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
self.check(['gen', '-c', 'gn_debug', '//out/Default'], mbw=mbw, ret=1)
def test_gn_gen_swarming(self):
files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
mbw = self.fake_mbw(files)
self.check(['gen',
'-c', 'gn_debug',
'--swarming-targets-file', '/tmp/swarming_targets',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('/fake_src/out/Default/base_unittests.isolate',
mbw.files)
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
mbw.files)
def test_gn_isolate(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
self.check(['isolate', '-c', 'gn_debug', '//out/Default', 'base_unittests'],
files=files, ret=0)
# test running isolate on an existing build_dir
files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
files['/fake_src/out/Default/mb_type'] = 'gn\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
def test_gn_run(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
" 'label': '//base:base_unittests',"
" 'type': 'raw',"
" 'args': [],"
"}}\n"
),
'/fake_src/out/Default/base_unittests.runtime_deps': (
"base_unittests\n"
),
}
self.check(['run', '-c', 'gn_debug', '//out/Default', 'base_unittests'],
files=files, ret=0)
def test_gn_lookup(self):
self.check(['lookup', '-c', 'gn_debug'], ret=0)
def test_gn_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
out=("/fake_src/buildtools/linux64/gn gen _path_ "
"'--args=is_debug=false use_goma=true "
"goma_dir=\"/foo\"'\n" ))
def test_gyp_analyze(self):
mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
'/tmp/in.json', '/tmp/out.json'],
ret=0)
self.assertIn('analyzer', mbw.calls[0])
def test_gyp_crosscompile(self):
mbw = self.fake_mbw()
self.check(['gen', '-c', 'private', '//out/Release'], mbw=mbw, ret=0)
self.assertTrue(mbw.cross_compile)
def test_gyp_gen(self):
self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
ret=0,
out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
"python build/gyp_chromium -G output_dir=out\n"))
mbw = self.fake_mbw(win32=True)
self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
mbw=mbw, ret=0,
out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
"python build\\gyp_chromium -G output_dir=out\n"))
def test_gyp_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
def test_gyp_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
"python build/gyp_chromium -G output_dir=_path_\n"))
def test_help(self):
orig_stdout = sys.stdout
try:
sys.stdout = StringIO.StringIO()
self.assertRaises(SystemExit, self.check, ['-h'])
self.assertRaises(SystemExit, self.check, ['help'])
self.assertRaises(SystemExit, self.check, ['help', 'gen'])
finally:
sys.stdout = orig_stdout
def test_validate(self):
self.check(['validate'], ret=0)
def test_bad_validate(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = TEST_BAD_CONFIG
self.check(['validate'], mbw=mbw, exception=TEST_BAD_CONFIG_ERR)
if __name__ == '__main__':
unittest.main()
|
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from graphite.render.attime import parseTimeReference, parseATTime, parseTimeOffset, getUnitString
from datetime import datetime, timedelta
from django.utils import timezone
from .base import TestCase
import pytz
import mock
def mockDateTime(year, month, day, hour, minute, second):
class MockedDateTime(datetime):
@classmethod
def now(cls, tzinfo=None):
if tzinfo:
return tzinfo.localize(cls(year, month, day, hour, minute, second))
return cls(year, month, day, hour, minute, second)
return MockedDateTime
@mock.patch('graphite.render.attime.datetime', mockDateTime(2015, 3, 8, 12, 0, 0))
class ATTimeTimezoneTests(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
MOCK_DATE = specified_tz.localize(datetime(2015, 1, 1, 11, 00))
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_absolute_time_short(self):
time_string = '9:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz_short(self):
time_string = '9:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYYYMMDD(self):
time_string = '20150110'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_midnight(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("1:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+1h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("3:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now")
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("11:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_should_handle_dst_boundary(self):
expected_time = self.specified_tz.localize(datetime.strptime("04:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_parse_naive_datetime(self):
time_ref = parseATTime(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), self.specified_tz)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseATTime(self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), self.specified_tz)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2015, 1, 1, 11, 0, 0))
class parseTimeReferenceTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('')
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("random")
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now")
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference(":")
def test_parse_naive_datetime(self):
time_ref = parseTimeReference(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseTimeReference(self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)))
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_am(self):
time_ref = parseTimeReference("8am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 0))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_pm(self):
time_ref = parseTimeReference("10pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 22, 0))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday")
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_today(self):
time_ref = parseTimeReference("today")
expected = self.zone.localize(datetime(2015, 1, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow")
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606")
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8")
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10")
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference("january800")
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("january")
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday")
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2010, 3, 30, 00, 0, 0))
class parseTimeReferenceTestBug551771(TestCase):
zone = pytz.utc
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/23/10")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20100223")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
class parseTimeOffsetTest(TestCase):
def test_parse_None_returns_empty_timedelta(self):
time_ref = parseTimeOffset(None)
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_integer_raises_TypeError(self):
with self.assertRaises(TypeError):
parseTimeOffset(1)
def test_parse_string_starting_neither_with_minus_nor_digit_raises_KeyError(self):
with self.assertRaises(KeyError):
parseTimeOffset("Something")
def test_parse_m_as_unit_raises_Exception(self):
with self.assertRaises(Exception):
parseTimeOffset("1m")
def test_parse_digits_only_raises_exception(self):
with self.assertRaises(Exception):
parseTimeOffset("10")
def test_parse_alpha_only_raises_KeyError(self):
with self.assertRaises(KeyError):
parseTimeOffset("month")
def test_parse_minus_only_returns_zero(self):
time_ref = parseTimeOffset("-")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_plus_only_returns_zero(self):
time_ref = parseTimeOffset("+")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_ten_days(self):
time_ref = parseTimeOffset("10days")
expected = timedelta(10)
self.assertEquals(time_ref, expected)
def test_parse_zero_days(self):
time_ref = parseTimeOffset("0days")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_minus_ten_days(self):
time_ref = parseTimeOffset("-10days")
expected = timedelta(-10)
self.assertEquals(time_ref, expected)
def test_parse_five_seconds(self):
time_ref = parseTimeOffset("5seconds")
expected = timedelta(seconds=5)
self.assertEquals(time_ref, expected)
def test_parse_five_minutes(self):
time_ref = parseTimeOffset("5minutes")
expected = timedelta(minutes=5)
self.assertEquals(time_ref, expected)
def test_parse_five_hours(self):
time_ref = parseTimeOffset("5hours")
expected = timedelta(hours=5)
self.assertEquals(time_ref, expected)
def test_parse_five_weeks(self):
time_ref = parseTimeOffset("5weeks")
expected = timedelta(weeks=5)
self.assertEquals(time_ref, expected)
def test_parse_one_month_returns_thirty_days(self):
time_ref = parseTimeOffset("1month")
expected = timedelta(30)
self.assertEquals(time_ref, expected)
def test_parse_two_months_returns_sixty_days(self):
time_ref = parseTimeOffset("2months")
expected = timedelta(60)
self.assertEquals(time_ref, expected)
def test_parse_twelve_months_returns_360_days(self):
time_ref = parseTimeOffset("12months")
expected = timedelta(360)
self.assertEquals(time_ref, expected)
def test_parse_one_year_returns_365_days(self):
time_ref = parseTimeOffset("1year")
expected = timedelta(365)
self.assertEquals(time_ref, expected)
def test_parse_two_years_returns_730_days(self):
time_ref = parseTimeOffset("2years")
expected = timedelta(730)
self.assertEquals(time_ref, expected)
class getUnitStringTest(TestCase):
def test_get_seconds(self):
test_cases = ['s', 'se', 'sec', 'second', 'seconds']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'seconds')
def test_get_minutes(self):
test_cases = ['min', 'minute', 'minutes']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'minutes')
def test_get_hours(self):
test_cases = ['h', 'ho', 'hour', 'hours']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'hours')
def test_get_days(self):
test_cases = ['d', 'da', 'day', 'days']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'days')
def test_get_weeks(self):
test_cases = ['w', 'we', 'week', 'weeks']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'weeks')
def test_get_months(self):
test_cases = ['mon', 'month', 'months']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'months')
def test_get_years(self):
test_cases = ['y', 'ye', 'year', 'years']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'years')
def test_m_raises_Exception(self):
with self.assertRaises(Exception):
_ = getUnitString("m")
def test_integer_raises_Exception(self):
with self.assertRaises(Exception):
_ = getUnitString(1)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2016, 2, 29, 00, 0, 0))
class parseATTimeTestLeapYear(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2015, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2012, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2016, 1, 30, 0, 0))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime',mockDateTime(2013, 2, 28, 00, 0, 0))
class parseATTimeTestLeapYear2(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2012, 2, 29, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2009, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2013, 1, 29, 0, 0))
self.assertEquals(time_ref, expected)
class parseATTimeTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
@unittest.expectedFailure
def test_parse_noon_plus_yesterday(self):
time_ref = parseATTime("noon+yesterday")
expected = datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day - 1, 12, 00)
self.assertEquals(time_ref, expected)
class parseATTimeTestNow(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
now = '11:0020171013'
MOCK_DATE = specified_tz.localize(datetime(2015, 1, 1, 11, 00))
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_should_return_absolute_time_short(self):
time_string = '9:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz_short(self):
time_string = '9:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYYYMMDD(self):
time_string = '20150110'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_midnight(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("1:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+1h", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_day_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20171014", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("3:00_20171014", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("11:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", now=self.now)
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("11:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("10:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz, now=self.now)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_parse_naive_datetime(self):
time_ref = parseATTime(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), self.specified_tz, now=self.now)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseATTime(self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), self.specified_tz, now=self.now)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
class parseTimeReferenceTestNow(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
now = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('', now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None, now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("random", now=self.now)
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now", now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference(":", now=self.now)
def test_parse_naive_datetime(self):
time_ref = parseTimeReference(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseTimeReference(self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_am(self):
time_ref = parseTimeReference("8am", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 0))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_pm(self):
time_ref = parseTimeReference("10pm", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 22, 0))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday", now=self.now)
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_today(self):
time_ref = parseTimeReference("today", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15", now=self.now)
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015", now=self.now)
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606", now=self.now)
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference("january800", now=self.now)
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("january", now=self.now)
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday", now=self.now)
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
|
|
"""Support for Ecobee Thermostats."""
import logging
import voluptuous as vol
from homeassistant.components import ecobee
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
DOMAIN, STATE_COOL, STATE_HEAT, STATE_AUTO, STATE_IDLE,
ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_HUMIDITY_LOW, SUPPORT_TARGET_HUMIDITY_HIGH,
SUPPORT_AUX_HEAT, SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE_LOW)
from homeassistant.const import (
ATTR_ENTITY_ID, STATE_ON, STATE_OFF, ATTR_TEMPERATURE, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_MIN_ON_TIME = 'fan_min_on_time'
ATTR_RESUME_ALL = 'resume_all'
DEFAULT_RESUME_ALL = False
TEMPERATURE_HOLD = 'temp'
VACATION_HOLD = 'vacation'
AWAY_MODE = 'awayMode'
SERVICE_SET_FAN_MIN_ON_TIME = 'ecobee_set_fan_min_on_time'
SERVICE_RESUME_PROGRAM = 'ecobee_resume_program'
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
})
RESUME_PROGRAM_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE |
SUPPORT_HOLD_MODE | SUPPORT_OPERATION_MODE |
SUPPORT_TARGET_HUMIDITY_LOW | SUPPORT_TARGET_HUMIDITY_HIGH |
SUPPORT_AUX_HEAT | SUPPORT_TARGET_TEMPERATURE_HIGH |
SUPPORT_TARGET_TEMPERATURE_LOW | SUPPORT_FAN_MODE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ecobee Thermostat Platform."""
if discovery_info is None:
return
data = ecobee.NETWORK
hold_temp = discovery_info['hold_temp']
_LOGGER.info(
"Loading ecobee thermostat component with hold_temp set to %s",
hold_temp)
devices = [Thermostat(data, index, hold_temp)
for index in range(len(data.ecobee.thermostats))]
add_entities(devices)
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [device for device in devices
if device.entity_id in entity_id]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MIN_ON_TIME, fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA)
class Thermostat(ClimateDevice):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index, hold_temp):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
self._name = self.thermostat['name']
self.hold_temp = hold_temp
self.vacation = None
self._climate_list = self.climate_list
self._operation_list = ['auto', 'auxHeatOnly', 'cool',
'heat', 'off']
self._fan_list = ['auto', 'on']
self.update_without_throttle = False
def update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(
self.thermostat_index)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat['name']
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat['runtime']['actualTemperature'] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredHeat'] / 10.0
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return None
if self.current_operation == STATE_HEAT:
return self.thermostat['runtime']['desiredHeat'] / 10.0
if self.current_operation == STATE_COOL:
return self.thermostat['runtime']['desiredCool'] / 10.0
return None
@property
def fan(self):
"""Return the current fan status."""
if 'fan' in self.thermostat['equipmentStatus']:
return STATE_ON
return STATE_OFF
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self.thermostat['runtime']['desiredFanMode']
@property
def current_hold_mode(self):
"""Return current hold mode."""
mode = self._current_hold_mode
return None if mode == AWAY_MODE else mode
@property
def fan_list(self):
"""Return the available fan modes."""
return self._fan_list
@property
def _current_hold_mode(self):
events = self.thermostat['events']
for event in events:
if event['running']:
if event['type'] == 'hold':
if event['holdClimateRef'] == 'away':
if int(event['endDate'][0:4]) - \
int(event['startDate'][0:4]) <= 1:
# A temporary hold from away climate is a hold
return 'away'
# A permanent hold from away climate
return AWAY_MODE
if event['holdClimateRef'] != "":
# Any other hold based on climate
return event['holdClimateRef']
# Any hold not based on a climate is a temp hold
return TEMPERATURE_HOLD
if event['type'].startswith('auto'):
# All auto modes are treated as holds
return event['type'][4:].lower()
if event['type'] == 'vacation':
self.vacation = event['name']
return VACATION_HOLD
return None
@property
def current_operation(self):
"""Return current operation."""
if self.operation_mode == 'auxHeatOnly' or \
self.operation_mode == 'heatPump':
return STATE_HEAT
return self.operation_mode
@property
def operation_list(self):
"""Return the operation modes list."""
return self._operation_list
@property
def operation_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self.thermostat['settings']['hvacMode']
@property
def mode(self):
"""Return current mode, as the user-visible name."""
cur = self.thermostat['program']['currentClimateRef']
climates = self.thermostat['program']['climates']
current = list(filter(lambda x: x['climateRef'] == cur, climates))
return current[0]['name']
@property
def fan_min_on_time(self):
"""Return current fan minimum on time."""
return self.thermostat['settings']['fanMinOnTime']
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
# Move these to Thermostat Device and make them global
status = self.thermostat['equipmentStatus']
operation = None
if status == '':
operation = STATE_IDLE
elif 'Cool' in status:
operation = STATE_COOL
elif 'auxHeat' in status:
operation = STATE_HEAT
elif 'heatPump' in status:
operation = STATE_HEAT
else:
operation = status
return {
"actual_humidity": self.thermostat['runtime']['actualHumidity'],
"fan": self.fan,
"climate_mode": self.mode,
"operation": operation,
"equipment_running": status,
"climate_list": self.climate_list,
"fan_min_on_time": self.fan_min_on_time
}
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._current_hold_mode == AWAY_MODE
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
return 'auxHeat' in self.thermostat['equipmentStatus']
def turn_away_mode_on(self):
"""Turn away mode on by setting it on away hold indefinitely."""
if self._current_hold_mode != AWAY_MODE:
self.data.ecobee.set_climate_hold(self.thermostat_index, 'away',
'indefinite')
self.update_without_throttle = True
def turn_away_mode_off(self):
"""Turn away off."""
if self._current_hold_mode == AWAY_MODE:
self.data.ecobee.resume_program(self.thermostat_index)
self.update_without_throttle = True
def set_hold_mode(self, hold_mode):
"""Set hold mode (away, home, temp, sleep, etc.)."""
hold = self.current_hold_mode
if hold == hold_mode:
# no change, so no action required
return
if hold_mode == 'None' or hold_mode is None:
if hold == VACATION_HOLD:
self.data.ecobee.delete_vacation(
self.thermostat_index, self.vacation)
else:
self.data.ecobee.resume_program(self.thermostat_index)
else:
if hold_mode == TEMPERATURE_HOLD:
self.set_temp_hold(self.current_temperature)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index, hold_mode, self.hold_preference())
self.update_without_throttle = True
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = (
self.thermostat['runtime']['desiredCool'] / 10.0)
self.data.ecobee.set_hold_temp(self.thermostat_index,
cool_temp_setpoint, heat_temp_setpoint,
self.hold_preference())
_LOGGER.debug("Setting ecobee hold_temp to: heat=%s, is=%s, "
"cool=%s, is=%s", heat_temp,
isinstance(heat_temp, (int, float)), cool_temp,
isinstance(cool_temp, (int, float)))
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if (fan_mode.lower() != STATE_ON) and (fan_mode.lower() != STATE_AUTO):
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
cool_temp = self.thermostat['runtime']['desiredCool'] / 10.0
heat_temp = self.thermostat['runtime']['desiredHeat'] / 10.0
self.data.ecobee.set_fan_mode(self.thermostat_index, fan_mode,
cool_temp, heat_temp,
self.hold_preference())
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.current_operation == STATE_HEAT or self.current_operation == \
STATE_COOL:
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat['settings']['heatCoolMinDelta'] / 10
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.current_operation == STATE_AUTO and \
(low_temp is not None or high_temp is not None):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error(
"Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
self.data.ecobee.set_humidity(self.thermostat_index, humidity)
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
self.data.ecobee.set_hvac_mode(self.thermostat_index, operation_mode)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(
self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, 'true' if resume_all else 'false')
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are 'useEndTime4hour',
# 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe'
default = self.thermostat['settings']['holdAction']
if default == 'nextTransition':
return default
# add further conditions if other hold durations should be
# supported; note that this should not include 'indefinite'
# as an indefinite away hold is interpreted as away_mode
return 'nextTransition'
@property
def climate_list(self):
"""Return the list of climates currently available."""
climates = self.thermostat['program']['climates']
return list(map((lambda x: x['name']), climates))
|
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
repo.py
---------------------
Date : November 2013
Copyright : (C) 2013-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2013'
__copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import re
from commitish import Commitish
from tag import Tag
import geogig
from geogigexception import GeoGigException
from feature import Feature
from tree import Tree
from utils import mkdir
from py4jconnector import Py4JCLIConnector
from geogigserverconnector import GeoGigServerConnector
import tempfile
import datetime
def _resolveref(ref):
'''
Tries to resolve the pased object into a string representing a commit reference
(a SHA-1, branch name, or something like HEAD~1)
This should be called by all commands using references, so they can accept both
strings and Commitish objects indistinctly
'''
if ref is None:
return None
if isinstance(ref, Commitish):
return ref.ref
elif isinstance(ref, basestring):
return ref
else:
return str(ref)
SHA_MATCHER = re.compile(r"\b([a-f0-9]{40})\b")
class Repository(object):
_logcache = None
def __init__(self, url, connector=None, init=False, initParams=None):
'''
url: The url of the repository. Only file paths are supported so far. Remote repos are not supported
connector: the connector to use to communicate with the repository
init: True if the repository should be initialized
'''
self.url = url
self.connector = Py4JCLIConnector() if connector is None else connector
if init:
try:
mkdir(url)
except Exception, e:
raise GeoGigException("Cannot create repository folder.\nCheck that path is correct and you have permission")
self.connector.setRepository(self)
try:
self.connector.checkisrepo()
isAlreadyRepo = True
except GeoGigException, e:
isAlreadyRepo = False
if init:
if isAlreadyRepo:
raise GeoGigException("Cannot init, the folder is already a geogig repository")
else:
self.init(initParams)
self.connector.checkisrepo()
self.cleancache()
@staticmethod
def newrepofromclone(url, path, connector=None, username=None, password=None):
'''
Clones a given repository into a local folder and returns a repository object representing it
url: the url of the repo to clone
path: the path to clone the repo into
connector: the connector to use to communicate with the repository
'''
connector = Py4JCLIConnector() if connector is None else connector
connector.clone(url, path, username, password)
return Repository(path, connector)
def createdat(self):
'''Returns the creation date of this repository'''
return self.connector.createdat()
def cleancache(self):
self._logcache = None
def description(self):
'''Returns the description of this repository'''
#TODO
return ''
def revparse(self, rev):
'''Returns the SHA-1 of a given element, represented as a string'''
if SHA_MATCHER.match(rev) is not None:
return rev
else:
return self.connector.revparse(rev)
@property
def head(self):
'''Returns a Commitish representing the current HEAD'''
return self.connector.head()
@property
def index(self):
'''Returns a Commitish representing the index'''
return Commitish(self, geogig.STAGE_HEAD)
@property
def workingtree(self):
'''Returns a Commitish representing workingtree'''
return Commitish(self, geogig.WORK_HEAD)
@property
def master(self):
'''Returns a Commitish representing the master branch'''
return Commitish(self, geogig.MASTER)
def isdetached(self):
'''Returns true if the repos has a detached HEAD'''
return self.head.id == self.head.ref
def synced(self, branch=geogig.HEAD, credentials=None):
'''
Returns a tuple with number of (ahead, behind) commits between this repo and a remote
It uses the passed branch or, if not passed, the current branch
If the repository is headless, or if not remote is defined, it will throw an exception
It uses the "origin" remote if it exists, otherwise it uses the first remote available.
If the remote requires authentication, a tuple of (username,password) must be passed
in the credentials parameter
'''
if (branch == geogig.HEAD and self.isdetached()):
raise GeoGigException("Cannot use current branch. The repository has a detached HEAD")
remotes = self.remotes
if remotes:
if "origin" in remotes:
remote = remotes["origin"]
remotename = "origin"
else:
remotename = remotes.keys()[0]
remote = remotes.values()[0]
else:
raise GeoGigException("No remotes defined")
if isremoteurl(remote):
repo = Repository(remote, GeoGigServerConnector(credentials))
else:
conn = self.connector.__class__()
repo = Repository(remote[len("file:/"):], conn)
localtip = self.revparse(branch)
remotetip = repo.revparse(branch)
if remotetip == localtip:
return 0, 0
if remotetip == geogig.NULL_ID:
log = self.log(branch)
push = len(log)
pull = 0
else:
trackedbranchhead = self.revparse("refs/remotes/" + remotename + "/" + branch)
log = self.log(branch, trackedbranchhead)
push = len(log)
log = repo.log(branch, trackedbranchhead)
pull = len(log)
return push, pull
def mergemessage(self):
'''
Return the merge message if the repo is in a merge operation stopped due to conflicts.
Returns an empty string if it is not the case
'''
return self.connector.mergemessage()
def log(self, tip=None, sincecommit=None, until=None, since=None, path=None, n=None):
'''
Returns a list of Commit starting from the passed tip ref, or HEAD if there is no passed ref,
and up to the sincecommit, if passed, or to first commit in the history if not.
If a path is passed, it only returns commits in which that path was modified
Date limits can be passed using the since and until parameters
A maximum number of commits can be set using the n parameter
'''
tip = tip or geogig.HEAD
if path is not None or tip != geogig.HEAD or n is not None or since is not None or until is not None or sincecommit is not None:
return self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
if self._logcache is None:
self._logcache = self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
return self._logcache
def commitatdate(self, t):
'''Returns a Commit corresponding to a given instant, which is passed as a datetime.datetime'''
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
milisecs = int(delta.total_seconds()) * 1000
log = self.connector.log(geogig.HEAD, until=str(milisecs), n=1)
if log:
return log[0]
else:
raise GeoGigException("Invalid date for this repository")
@property
def trees(self):
return self._trees()
def _trees(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Tree objects with all the trees for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Tree)]
def features(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Feature objects with all the features for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Feature)]
def children(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Tree and Feature objects with all the children for the passed ref and path'''
return self.connector.children(_resolveref(ref), path, recursive)
@property
def branches(self):
''' Returns a dict with branch names as keys and branch refs as values'''
return self.connector.branches()
@property
def tags(self):
'''Returns a dict with tag names as keys and tag objects as values'''
tags = self.connector.tags()
tags = {k: Tag(self, v, k) for k, v in tags.iteritems()}
return tags
def clone(self, path):
'''Clones this repo in the specified path. Returns a reference to the cloned repo'''
url = self.url.replace('\\', '/')
self.connector.clone(url, path)
return Repository(path, self.connector.__class__(), False)
def createbranch(self, ref, name, force=False, checkout=False):
'''Creates a new branch in the repo. Returns the commitish representing the branch'''
if checkout:
self.cleancache()
return self.connector.createbranch(_resolveref(ref), name, force, checkout)
def deletebranch(self, name, remote=False):
'''Deletes the passed branch'''
self.connector.deletebranch(name, remote)
def createtag(self, ref, name, message):
'''Creates a new tag, with the passed message'''
self.connector.createtag(_resolveref(ref), name, message)
def deletetag(self, name):
'''Deletes the passed tag'''
self.connector.deletetag(name)
def diff(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD, path=None):
'''Returns a list of DiffEntry representing the changes between 2 commits.
If a path is passed, it only shows changes corresponding to that path'''
return self.connector.diff(_resolveref(refa), _resolveref(refb), path)
def difftreestats(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD):
'''Returns a dict with tree changes statistics for the passed refs. Keys are paths, values are tuples
in the form (added, deleted, modified) corresponding to changes made to that path'''
return self.connector.difftreestats(_resolveref(refa), _resolveref(refb))
def treediff(self, path, refa=geogig.HEAD, refb=geogig.WORK_HEAD):
'''Returns a tuple attributes, features with a description of features changed between the specified refs
Attributes is a dict with attribute names as keys and the description of the attribute as value
Features is a list, with each element being another list representing a feature and the changes
in it between the two specifed versions.
The length of this list is the same as the one of attributes dictionary
The value for an attribute is a tuple of (change_type, old value, new value) in case the change for the
attribute is a modification, or (change_type, value), if the change is a removal, addition or
unmodified'''
return self.connector.treediff(path, _resolveref(refa), _resolveref(refb))
def unstaged(self):
'''Returns a list of diffEntry with the differences between staging area and working tree'''
return self.diff(geogig.STAGE_HEAD, geogig.WORK_HEAD)
def staged(self):
'''Returns a list of diffEntry with the differences between HEAD and Staging area'''
return self.diff(geogig.HEAD, geogig.STAGE_HEAD)
def notindatabase(self):
'''Returns a list of diffEntry with the differences between HEAD and Working Tree'''
return self.diff(geogig.HEAD, geogig.WORK_HEAD)
def conflicts(self):
'''Returns a dict of tuples. Keys are paths, values are tuples with the 3 versions
defining a conflict, as Feature objects'''
conflicts = {}
_conflicts = self.connector.conflicts()
for path, c in _conflicts.iteritems():
c = tuple(Feature(self, ref, path) for ref in c)
conflicts[path] = c
return conflicts
def checkout(self, ref, paths=None, force=False):
'''Checks out the passed ref into the working tree.
If a path list is passed, it will just checkout those paths.
If force is True, it will check out even if the working tree is not clean'''
self.connector.checkout(_resolveref(ref), paths, force)
self.cleancache()
def updatepathtoref(self, ref, paths):
'''
Updates the element in the passed paths to the version corresponding to the passed ref.
If the path is conflicted (unmerged), it will also resolve the conflict
'''
ref = _resolveref(ref)
for path in paths:
self.connector.reset(ref, path=path)
return self.connector.checkout(ref, paths)
def solveconflict(self, path, attributes):
'''
Solves a conflict at the specified path with a new feature defined by the passed attributes.
Attributes are passed in a dict with attribute names as keys and attribute values as values.
This can be used only with features containing one and only one geometry attribute
'''
self.reset(geogig.HEAD, path=path)
self.insertfeature(path, attributes)
self.add([path])
def solveconflicts(self, paths, version=geogig.OURS):
'''
Solves the specified paths with one of the corresponding existing versions (ours or theirs)
Version is specified using geogig.OURS or geogig.THEIRS
'''
self.connector.solveconflicts(paths, version)
def add(self, paths=[]):
'''Adds the passed paths to the staging area. If no paths are passed, it will add all the unstaged ones'''
self.connector.add(paths)
def addandcommit(self, message, paths=[]):
self.add(paths)
return self.commit(message, paths)
def commit(self, message, paths=[]):
'''
Creates a new commit with the changes in the specified paths.
If no paths are passed, it will commit all staged features
Raises an UnconfiguredUserException if there is no user configured and it cannot commit
'''
self.connector.commit(message, paths)
self.cleancache()
# TODO: maybe add the commit instead of invalidating the whole cache
def commitfromgeopkg(self, geopkg, message):
'''
Creates a new commit from the audited tables in a geopackage
'''
self.connector.commitfromgeopkg(geopkg, message)
def blame(self, path):
'''
Returns authorship information for the passed path
It is returned as a dict, with attribute names as keys.
Values are tuples of (value, commitid, authorname)
'''
return self.connector.blame(path)
def count(self, ref, path):
'''Returns the count of objects in a given path'''
output = self.show(_resolveref(ref) + ":" + path)
return int(output.split("\n")[1][5:].strip())
def feature(self, ref, path):
'''Returns a Feature object corresponding to the passed ref and path'''
return Feature(self, ref, path)
def featuredata(self, ref, path):
'''
Returns the attributes of a given feature, as a dict with attributes
names as keys and tuples of (attribute_value, attribute_type_name) as values.
Values are converted to appropriate types when possible, otherwise they are stored
as the string representation of the attribute
'''
data = self.connector.featuredata(_resolveref(ref), path)
if len(data) == 0:
raise GeoGigException("The specified feature does not exist")
return data
def featuretype(self, ref, tree):
'''Returns the featuretype of a tree as a dict in the form attrib_name : attrib_type_name'''
return self.connector.featuretype(ref, tree)
def versions(self, path):
'''
Returns all versions os a given feature.
It returns a dict with Commit objects as keys, and feature data for the corresponding
commit as values. Feature data is another dict with attributes
names as keys and tuples of (attribute_value, attribute_type_name) as values.
Values are converted to appropriate types when possible, otherwise they are stored
as the string representation of the attribute
'''
entries = self.log(geogig.HEAD, path=path)
refs = [entry.ref + ":" + path for entry in entries]
versions = []
if refs:
features = self.connector.featuresdata(refs)
for entry, ref in zip(entries, refs):
versions.append((entry, features[ref]))
return versions
def featurediff(self, ref, ref2, path):
'''
Returns a dict with attributes that have changed in the specified feature path between the specified refs
Keys are attribute names. Values are tuples of "(oldvalue, newvalue)"
If the feature has been added, oldvalue = None
If the feature has been removed, newvalue = None
Values are converted to appropriate types if possible, otherwise they are stored as strings
'''
return self.connector.featurediff(_resolveref(ref), _resolveref(ref2), path)
def reset(self, ref, mode=geogig.RESET_MODE_HARD, path=None):
'''Resets the current branch to the passed reference'''
self.connector.reset(ref, mode, path)
self.cleancache()
def exportshp(self, ref, path, shapefile, charset=None):
self.connector.exportshp(_resolveref(ref), path, shapefile, charset)
def exportsl(self, ref, path, database, user=None, table=None):
'''Export to a SpatiaLite database'''
self.connector.exportsl(_resolveref(ref), path, database, user, table)
def exportpg(self, ref, path, table, database, user, password=None, schema=None, host=None, port=None, overwrite=False):
self.connector.exportpg(_resolveref(ref), path, table, database, user, password, schema, host, port, overwrite)
def exportgeopkg(self, ref, path, geopkg, interchange=True, overwrite=False):
self.connector.exportgeopkg(_resolveref(ref), path, geopkg, interchange, overwrite)
def importgeojson(self, geojsonfile, add=False, dest=None, idAttribute=None, geomName=None, force=False):
self.connector.importgeojson(geojsonfile, add, dest, idAttribute, geomName, force)
def importshp(self, shpfile, add=False, dest=None, idAttribute=None, force=False, charset=None):
self.connector.importshp(shpfile, add, dest, idAttribute, force, charset)
def importgeopkg(self, geopkg, table, dest):
self.connector.importgeopkg(geopkg, table, dest)
def importpg(self, database, user=None, password=None, table=None, schema=None,
host=None, port=None, add=False, dest=None, force=False, idAttribute=None):
self.connector.importpg(database, user, password, table,
schema, host, port, add, dest, force, idAttribute)
def importsl(self, database, table, add=False, dest=None):
self.connector.importsl(database, table, add, dest)
def exportdiffs(self, commit1, commit2, path, filepath, old=False, overwrite=False, charset=None):
'''Exports the differences in a given tree between to commits, creating a shapefile
with the changed features corresponding to the newest of them, or the oldest if old = False'''
self.connector.exportdiffs(_resolveref(commit1), _resolveref(commit2), path, filepath, old, overwrite, charset)
def insertfeature(self, path, attributes):
'''
Inserts a feature to the working tree.
The attributes are passed in a dict with attribute names as keys and attribute values as values.
There must be one and only one geometry attribute, with a Geometry object.
It will overwrite any feature in the same path, so this can be used to add a new feature or to
modify an existing one
'''
self.connector.insertfeatures({path: attributes})
def insertfeatures(self, features):
'''
Inserts a set of features into the working tree.
Features are passed in a dict with paths as keys and attributes as values
The attributes for each feature are passed in a dict with attribute names as keys and attribute values as values.
There must be one an only one geometry attribute, with a Geometry object.
It will overwrite any feature in the same path, so this can be used to add new features or to
modify existing ones
'''
self.connector.insertfeatures(features)
def removefeatures(self, paths):
'''Removes the passed features paths from the working tree and index, so they are no longer versioned'''
self.connector.removepaths(paths)
def removetrees(self, paths):
'''Removes the passed tree paths from the working tree and index, so they are no longer versioned'''
self.connector.removepaths(paths, True)
def commonancestor(self, refa, refb):
'''
Returns the common ancestor of the two passed references as a commitish object
Returns None if no common ancestor exists for the passed references
'''
return self.connector.commonancestor(refa, refb)
def merge(self, ref, nocommit=False, message=None):
'''Merges the passed ref into the current branch'''
self.connector.merge(_resolveref(ref), nocommit, message)
self.cleancache()
def rebase(self, ref):
'''Rebases the current branch using the passed ref'''
self.connector.rebase(_resolveref(ref))
self.cleancache()
def abort(self):
'''
Abort a merge or rebase operation, if it was stopped due to conflicts
Does nothing if the repo is not in a conflicted state
'''
self.connector.abort()
def continue_(self):
'''
Continues a rebase operation that was stopped due to conflicts
Raises a GeoGigException if the repo is not clean and cannot continue the operation
Does nothing if the repo is not in a conflicted state caused by a rebase operation
'''
self.connector.continue_()
def cherrypick(self, ref):
'''Cherrypicks a commit into the current branch'''
self.connector.cherrypick(_resolveref(ref))
self.cleancache()
@property
def remotes(self):
'''Returns a dict with remote names as keys and remote urls as values'''
return self.connector.remotes()
def addremote(self, name, url, username, password):
'''Adds a new remote'''
self.connector.addremote(name, url, username, password)
def removeremote(self, name):
'''Removes a remote'''
self.connector.removeremote(name)
def ismerging(self):
'''Returns true if the repo is in the middle of a merge stopped due to conflicts'''
return self.connector.ismerging()
def isrebasing(self):
'''Returns true if the repo is in the middle of a rebase stopped due to conflicts'''
return self.connector.isrebasing()
def downloadosm(self, osmurl, bbox, mappingorfile=None):
'''Downloads from a OSM server using the overpass API.
The bbox parameter defines the extent of features to download.
Accepts a mapping object or a string with the path to a mapping file'''
mappingfile = None
if mappingorfile is not None:
mappingfile = self._mapping(mappingorfile)
self.connector.downloadosm(osmurl, bbox, mappingfile)
self.cleancache()
def _mapping(self, mappingorfile):
if isinstance(mappingorfile, basestring):
return mappingorfile
else:
try:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(mappingorfile.asjson())
f.close()
return f.name
finally:
f.close()
def importosm(self, osmfile, add=False, mappingorfile=None):
'''
Imports an osm file.
Accepts a mapping object or a string with the path to a mapping file to define an import mapping
'''
mappingfile = None
if mappingorfile is not None:
mappingfile = self._mapping(mappingorfile)
self.connector.importosm(osmfile, add, mappingfile)
def exportosm(self, osmfile, ref=None, bbox=None):
'''
Exports the OSM data in the repository to an OSM XML file
A bounding box can be passed to be used as a filter.
It is passed as a tuple of 4 elements containing the boundary coordinates in the form (S, W, N, E)
'''
self.connector.exportosm(osmfile, _resolveref(ref), bbox)
def exportosmchangeset(self, osmfile, changesetid=None, refa=None, refb=None):
'''
Exports the difference between the osm data in two commits as a osm changeset.
An alternative changeset id can be used to replace negative ids if they exist
'''
self.connector.exportosmchangeset(osmfile, changesetid, _resolveref(refa), _resolveref(refb))
def maposm(self, mappingorfile):
'''Applies a mapping to the OSM data in the repo.
The mapping can be passed as a file path to a mapping file, or as a OSMMapping object'''
mappingfile = self._mapping(mappingorfile)
self.connector.maposm(mappingfile)
def show(self, ref):
'''Returns the description of an element, as printed by the GeoGig show command'''
return self.connector.show(_resolveref(ref))
def config(self, param, value, global_=False):
'''Configures a geogig parameter with a the passed value'''
return self.connector.config(param, value, global_)
def getconfig(self, param):
'''Returns the current value for a given parameter'''
return self.connector.getconfig(param)
def pull(self, remote=geogig.ORIGIN, branch=None, rebase=False):
'''
Pulls from the specified remote and specified branch.
If no branch is provided, it will use the name of the current branch, unless the repo is headless.
In that case, and exception will be raised
If rebase == True, it will do a rebase instead of a merge
'''
if branch is None and self.isdetached():
raise GeoGigException("HEAD is detached. Cannot pull")
branch = branch or self.head.ref
self.connector.pull(remote, branch, rebase)
self.cleancache()
def push(self, remote, branch=None, all=False):
'''
Pushes to the specified remote and specified branch.
If no branch is provided, it will use the name of the current branch, unless the repo is headless.
In that case, and exception will be raised.
if all == True, it will push all branches and ignore the branch.
'''
if branch is None and self.isdetached():
raise GeoGigException("HEAD is detached. Cannot push")
branch = branch or self.head.ref
return self.connector.push(remote, branch, all)
def init(self, initParams=None):
'''
Inits the repository.
Init params is a dict of paramName : paramValues to be supplied to the init command
'''
self.connector.init(initParams)
def isremoteurl(url):
# This code snippet has been taken from the Django source code
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url is not None and regex.search(url)
|
|
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from optparse import OptionParser
from os.path import exists
from swift.cli.ringbuilder import main as rb_main
import json
import pickle
import sys
import threading
USAGE = "usage: %prog -f <swift_ring.contents> -r <managed_region>"
DEVICE_KEY = "%(ip)s:%(port)d/%(device)s"
class RingValidationError(Exception):
pass
def create_buildfile(build_file, part_power, repl, min_part_hours,
update=False, data=None, validate=False):
if update:
# build file exists, so lets just update the existing build file
if not data:
data = get_build_file_data(build_file)
if data is None:
data = {}
if repl != data.get('replicas') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_replicas", repl])
if min_part_hours != data.get('min_part_hours') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_min_part_hours", min_part_hours])
if part_power != data.get('part_power'):
raise RingValidationError('Part power cannot be changed! '
'you must rebuild the ring if you need '
'to change it.\nRing part power: %s '
'Inventory part power: %s'
% (data.get('part_power'), part_power))
elif not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "create",
part_power, repl, min_part_hours])
def change_host_weight(build_file, host_search_str, weight):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "set_weight",
host_search_str, weight])
def remove_host_from_ring(build_file, host):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "remove",
host])
def update_host_in_ring(build_file, new_host, old_host, validate=False):
if new_host.get('zone', 0) != old_host['zone']:
devstr = DEVICE_KEY % new_host
raise RingValidationError('Cannot update zone on %s, this can only be '
'done when the drive is added' % devstr)
if new_host.get('region', 1) != old_host['region']:
devstr = DEVICE_KEY % new_host
raise RingValidationError('Cannot update region on %s, this can only '
'be done when the drive is added' % devstr)
try:
r_ip = new_host.get('repl_ip', new_host['ip'])
r_port = new_host.get('repl_port', new_host['port'])
weight = new_host.get('weight')
old_r_ip = old_host['replication_ip']
old_r_port = old_host['replication_port']
if r_ip != old_r_ip or r_port != old_r_port:
host_d = {'r_ip': r_ip, 'r_port': r_port}
host_d.update(new_host)
host_str = (
"%(ip)s:%(port)dR%(r_ip)s:%(r_port)d/%(device)s" % host_d
)
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_info", DEVICE_KEY % new_host,
host_str])
except Exception as ex:
raise RingValidationError(ex)
if weight != old_host['weight'] and not validate:
change_host_weight(build_file, DEVICE_KEY % new_host, weight)
def add_host_to_ring(build_file, host, validate=False):
host_str = ""
try:
if host.get('region') is not None:
host_str += 'r%(region)d' % host
host_str += "z%d" % (host.get('zone'))
host_str += "-%(ip)s:%(port)d" % host
if host.get('repl_ip'):
r_ip = host['repl_ip']
r_port = host.get('repl_port', host['port'])
host_str += "R%s:%d" % (r_ip, r_port)
elif host.get('repl_port'):
r_ip = host.get('repl_ip', host['ip'])
r_port = host['repl_port']
host_str += "R%s:%d" % (r_ip, r_port)
host_str += "/%(device)s" % host
weight = host.get('weight')
except Exception as ex:
raise RingValidationError(ex)
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, 'add',
host_str, str(weight)])
def run_and_wait(func, *args):
t = threading.Thread(target=func, args=args)
t.start()
return t.join()
def get_build_file_data(build_file):
build_file_data = None
if exists(build_file):
try:
with open(build_file) as bf_stream:
build_file_data = pickle.load(bf_stream)
except Exception as ex:
print("Error: failed to load build file '%s': %s" % (build_file,
ex))
build_file_data = None
return build_file_data
def build_ring(build_name, repl, min_part_hours, part_power, hosts,
region=None, validate=False):
# Create the build file
build_file = "%s.builder" % build_name
build_file_data = get_build_file_data(build_file)
update = build_file_data is not None
create_buildfile(
build_file,
part_power,
repl,
min_part_hours,
update,
data=build_file_data,
validate=validate
)
old_hosts = {}
if update:
for i, dev in enumerate(build_file_data['devs']):
if dev is not None:
if region is None or int(region) == int(dev['region']):
old_hosts[DEVICE_KEY % dev] = i
for host in hosts:
host_key = DEVICE_KEY % host
if region is None or int(region) == int(host['region']):
if host_key in old_hosts:
old_host = build_file_data['devs'][old_hosts[host_key]]
update_host_in_ring(build_file, host, old_host,
validate=validate)
old_hosts.pop(host_key)
else:
add_host_to_ring(build_file, host, validate=validate)
if old_hosts and not validate:
# There are still old hosts, these hosts must've been removed
for host in old_hosts:
remove_host_from_ring(build_file, host)
# Rebalance ring
if not validate:
if not hosts:
run_and_wait(
rb_main, ["swift-ring-builder", build_file, "write_ring"]
)
else:
run_and_wait(
rb_main, ["swift-ring-builder", build_file, "rebalance"]
)
def main(setup, region):
# load the json file
try:
with open(setup) as json_stream:
_contents_file = json.load(json_stream)
except Exception as ex:
print("Failed to load json string %s" % ex)
return 1
hosts = _contents_file['drives']
kargs = {'validate': True, 'hosts': hosts, 'region': region}
ring_call = [
_contents_file['builder_file'],
_contents_file['repl_number'],
_contents_file['min_part_hours'],
_contents_file['part_power']
]
try:
build_ring(*ring_call, **kargs)
except RingValidationError as ex:
print(ex)
return 2
# If the validation passes lets go ahead and build the rings.
kargs.pop('validate')
build_ring(*ring_call, **kargs)
if __name__ == "__main__":
parser = OptionParser(USAGE)
parser.add_option(
"-f",
"--file",
dest="setup",
help="Specify the swift ring contents file.",
metavar="FILE"
)
parser.add_option(
"-r",
"--region",
help="Specify the region to manage for the ring file.",
dest="region",
type='int',
metavar="REGION"
)
options, _args = parser.parse_args(sys.argv[1:])
if options.setup and not exists(options.setup):
print("Swift ring contents file not found or doesn't exist")
parser.print_help()
sys.exit(1)
sys.exit(main(options.setup, options.region))
|
|
#!/usr/bin/env python2
#
# Desc
#
from __future__ import division # Python 3 forward compatibility
from __future__ import print_function # Python 3 forward compatibility
import logging
import sys
import threading
import time
import timer
import util
import udp_listener
class Manager(threading.Thread):
def __init__(self, a_context):
threading.Thread.__init__(self)
self._logger = logging.getLogger('nodechecker.manager')
self._ctx = a_context
self._continue = True
self._udp_listener = udp_listener.UDPSocketListener(self._ctx)
# self._hb_sender = timer.HeartBeatSender(self._ctx.heartbeat_period,
# [self._ctx])
self._hb_sender = None
#self._dead_node_scanner = timer.DeadNodeScanner(self._ctx)
self._dead_node_scanner = None
def run(self):
self._continue = True
self._udp_listener.start()
self._sync_collections(self._ctx.node_list)
self._loop_forever()
# print("Thread:" + str(thread.get_ident()) + ' ' + 'EXIT Manager.run() ')
def shutdown(self):
self._logger.debug("ENTER shutdown()")
self._continue = False
self._stop_master_workers()
self._stop_udp_listener()
self._logger.debug("EXIT shutdown()")
def _loop_forever(self):
index = 0
self._assign_master(self._ctx.this_node)
while self._continue:
index = self._master_election(index)
# print("Thread:" + str(thread.get_ident()) + ' ' + 'EXIT Manager._loop_forever() ')
'''
def _do_shutdown(self, ex_info=None, exit_status=1, message="Shutting down"):
self._st(op_workers()
util.log_message(message, exc_info)
#print("Shutting down " + str(exc_info))
'''
def _stop_master_workers(self):
self._logger.debug("ENTER _stop_master_workers()")
self._logger.debug("Stopping hb_sender)")
if self._hb_sender and self._hb_sender.isAlive():
self._logger.debug("Stopping hb_sender ......")
self._hb_sender.cancel()
self._hb_sender.join()
self._logger.debug("Stopping hb_sender DONE")
# if self._udp_listener.isAlive():
# self._udp_listener.shutdown()
# self._udp_listener.join()
self._logger.debug("Stopping dead_node_scanner")
if self._dead_node_scanner and self._dead_node_scanner.isAlive():
self._logger.debug("Stopping dead_node_scanner.....")
self._dead_node_scanner.cancel()
self._dead_node_scanner.join()
self._logger.debug("Stopping dead_node_scanner.....DONE")
self._logger.debug("EXIT _stop_master_workers()")
def _stop_udp_listener(self):
if self._udp_listener.isAlive():
self._udp_listener.shutdown()
self._udp_listener.join()
def _master_election(self, index):
print('_master_election ENTER')
new_index = index
try:
my_pos = self._ctx.active_node_list.index(self._ctx.this_node)
print("_master_election My position in the list:" + str(my_pos) + " index:" + str(index))
# logger.debug("My position in the list is %d, a = %d" % (my_pos, index))
count = self._get_master_count()
# In case that master has changed, assign a new master to self
if self._ctx.this_node.role == "SLAVE" and self._ctx.master_list:
if self._ctx.my_master not in self._ctx.master_list:
self._assign_master(self._ctx.master_list[0])
# If there is not enough of masters, and own ranking on the list
# equals index, then become master
if count == "TOO_LOW":
print("too low")
if index == my_pos:
print("'_master_election _become_a_master")
self._become_a_master()
new_index = (index + 1) % len(self._ctx.active_node_list)
print("index:" + str(index))
# In case that there is enough or too many masters, become
# slave
else:
self._become_a_slave()
except:
self._logger.debug("_master_election - shutdown")
util.log_message("_master_election - shutdown", sys.exc_info())
self.shutdown()
return new_index
def _get_master_count(self, heartbeat_periods=1):
"""Listens to master heartbeat signals.
Depending on of number of received signals, a decision is made on
how to proceed:
- In case of too small number of signals, the node attempts to be
itself a master.
- In case of too big number of signals, if the node is a slave, it
checks if it should itself run as a slave.
"""
self._logger.debug("_get_master_count ENTER")
ret = "FINE"
self._ctx.heartbeats_received = 0
self._ctx.master_list[:] = []
# Sleep, count masters when awake
self._logger.debug("_get_master_count sleep")
time.sleep(heartbeat_periods * self._ctx.heartbeat_period)
print("_get_master_count awake")
self._logger.debug("_get_master_count role: " + self._ctx.this_node.role)
self._ctx.resource_lock.acquire()
try:
if self._ctx.this_node.role == "MASTER":
expected_masters = 0
else:
expected_masters = 1
self._logger.debug("master list length:" + str(len(self._ctx.master_list)))
self._logger.debug(" expected masters" + str(expected_masters))
if len(self._ctx.master_list) < expected_masters:
ret = "TOO_LOW"
elif len(self._ctx.master_list) > expected_masters:
ret = "TOO_HIGH"
else:
ret = "FINE"
# if self._ctx.this_node.role == "SLAVE" and self._ctx.master_list:
# if self._ctx.my_master not in self._ctx.master_list:
# self.assign_master(self._ctx.master_list[0])
except:
# print("_get_master_count exception: " + sys.exc_info())
self._logger.debug("STRANGE")
util.log_exception(sys.exc_info())
finally:
self._ctx.resource_lock.release()
self._logger.debug("_get_master_count EXIT returning " + str(ret))
return ret
def _assign_master(self, new_master):
# global my_master, node_manager
self._logger.info("Setting node %s configuration to SLAVE, master name is %s"
% (self._ctx.this_node.hostname, new_master.hostname))
self._ctx.my_master = new_master
self._ctx.node_manager.configure_node_as_slave(
self._ctx.this_node.ip_address, self._ctx.RRD_HTTP_SERVER_PORT,
self._ctx.my_master.ip_address,
self._ctx.RRD_HTTP_SERVER_PORT)
def _become_a_master(self):
"""Triggers actions needed to prepare the node for running
in MASTER role. Runs the master loop.
"""
# global Thread, node_manager
print("_become_a_master,ENTER role: " + self._ctx.this_node.role)
if self._ctx.this_node.role != "MASTER":
self._ctx.this_node.role = "MASTER"
self._logger.info("This node became a MASTER")
print("_become_a_master() starting _dead_node_scanner")
self._dead_node_scanner = timer.DeadNodeScanner(self._ctx)
self._dead_node_scanner.start()
print("_become_a_master() starting _hb_sender")
self._hb_sender = timer.HeartBeatSender(self._ctx.heartbeat_period, [self._ctx])
self._hb_sender.start()
print("****hbsender - role**" + self._hb_sender._ctx.this_node.role)
print("****hbsender - role**" + self._hb_sender._ctx.this_node.to_json())
print("_become_a_master() configure_node_as_master")
self._ctx.node_manager.configure_node_as_master(self._ctx.this_node.ip_address)
print("_become_a_master store_list_to_file")
# master nodes use active_node_list file
util.store_list_to_file(self._ctx.active_node_list, self._ctx.active_node_list_file,
self._ctx.this_node.group_name)
print("_become_a_master exiting if")
else:
print("*******role is MASTER")
print("_become_a_master _master_loop")
self._master_loop()
print("_become_a_master EXIT")
print(self._ctx.this_node.role)
def _become_a_slave(self):
self._logger.info("Trying to become a SLAVE")
if self._ctx.this_node.role == "MASTER":
self._stop_master_workers()
self._ctx.this_node.role = "SLAVE"
if self._ctx.master_list:
self._assign_master(self._ctx.master_list[0])
else:
util.log_message("Unable to set a master for the node, master list empty", sys.exc_info())
self.shutdown()
self._slave_loop(self._ctx.node_list)
def _continue_as_master(self):
"""Returns True if a node should continue in master role"""
try:
ret = True
my_pos = self._ctx.node_list.index(self._ctx.this_node)
for m in self._ctx.master_list:
master_pos = self._ctx.node_list.index(m)
if master_pos < my_pos:
ret = False
break
self._logger.info("Continuing as master: %s" % str(ret))
except ValueError:
self._logger.debug("Active node list: %s" % self._ctx.active_node_list)
self._logger.debug("Master list: %s" % self._ctx.master_list)
self._logger.debug("Master: %s" % m)
util.log_exception(sys.exc_info())
return ret
def _master_loop(self):
# global node_list
# global active_node_list
# global dead_node_set
print("_master_loop ENTER")
self._logger.info("Master Loop start")
while self._continue:
print("_master_loop while loop start")
# 1) Check number of masters - this includes sleeping
if self._get_master_count(1) == "TOO_HIGH":
if not self._continue_as_master():
break
# 2) Read node list file, update own node collections if needed
self._ctx.resource_lock.acquire()
print("_master_loop _sync_collections")
node_list_changed = self._sync_collections(self._ctx.node_list)[0]
# 3) Process notifications
#mail_sender.send_notifications(ntf_reader.get_notifications(node_list))
self._ctx.ntf_manager.process_notifications(self._ctx.ntf_reader.get_notifications(self._ctx.node_list))
# 4) Send and store changes
if node_list_changed:
util.send(self._ctx.this_node,
self._ctx.active_node_list,
util.json_from_list(self._ctx.active_node_list, 'active_node_list'))
util.store_list_to_file(self._ctx.active_node_list, self._ctx.active_node_list_file,
self._ctx.this_node.group_name)
# 5) release lock
self._ctx.resource_lock.release()
# Can not continue as master
if self._continue: self._become_a_slave()
def _slave_loop(self, a_node_list):
self._logger.info("Slave Loop start")
while self._continue:
try:
self._sync_collections(a_node_list)
if self._get_master_count(self._ctx.node_master_timeout) == "TOO_LOW":
break
if self._ctx.my_master != self._ctx.master_list[0]:
self._assign_master(self._ctx.master_list[0])
except:
util.log_exception(sys.exc_info())
self.shutdown()
# TODO who calls this?
def _wait_for_machine_configured(self, file_reader):
"""In case of nosql and bigdata CMT is changing hostname, wait for that
action being complete"""
total_sleep_time = 0
wait_for_conf = False
for n in self._ctx.node_list:
machine_type = file_reader.read_attribute(n.ip_address, 'MACHINE_TYPE')
if machine_type == 'manager':
wait_for_conf = True
break
if wait_for_conf:
while True:
if util.get_hostname() != self._ctx.this_node.hostname:
self._logger.debug("Sleep")
total_sleep_time += self._ctx.CMT_CONF_WAIT
if total_sleep_time >= self._ctx.MAX_CMT_CONF_WAIT:
util.log_exception("Waiting for machine configurtion took too long")
self.shutdown()
time.sleep(self._ctx.CMT_CONF_WAIT)
else:
# sleep once more before the exit: to make sure that hostname
# change propagated
time.sleep(self._ctx.CMT_CONF_WAIT)
break
'''
Update active_node_list based on freshly read nodelist.conf file.
That file is updated by admin or puppet master, which know what nodes
should be in the cluster
Returns freshly read node_list and the flag if active noode list changed
'''
def _sync_collections(self, a_node_list):
"""Read a_node_list, and update active_node_list and dead_node_list,
if needed"""
try:
a_node_list[:] = self._ctx.nodelist_reader.read_node_list(self._ctx.this_node, self._ctx.mode)
# Check if cluster scaled out, or just created
# Fetch new nodes and add them to active_node_list
nodes = [n for n in a_node_list if n not in self._ctx.active_node_list and
n.ip_address not in self._ctx.dead_node_set]
for m in nodes:
self._ctx.active_node_list.append(m)
if nodes:
active_nodes_changed = True
else:
active_nodes_changed = False
# Check if cluster scaled in
# Remove node from active_node_list, if the node is not present any more
# in the cluster
nodes = [n for n in self._ctx.active_node_list if n not in a_node_list]
for m in nodes:
self._ctx.active_node_list.remove(m)
if nodes:
active_nodes_changed = True
# Remove node from dead_node_set, if the node if the node is not present any more
# in the cluster
nodes = [ip for ip in self._ctx.dead_node_set
if not util.find_node_by_ip(ip, a_node_list)]
for m in nodes:
self._ctx.dead_node_set.remove(m)
except ValueError:
self._logger.debug('2')
util.log_exception(sys.exc_info())
return active_nodes_changed, a_node_list
# TODO: who should use this?
def _set_master(self):
if not self._ctx.node_list:
util.log_exception(sys.exc_info(), "Unable to set a master for the node")
self.shutdown()
self._assign_master(self._ctx.node_list[0])
|
|
import math
import sys
import numpy as np
import pygame
import freqshow
import ui
# Spectrogram renkleri.
def lerp(x, x0, x1, y0, y1):
return y0 + (y1 - y0)*((x - x0)/(x1 - x0))
def rgb_lerp(x, x0, x1, c0, c1):
return (math.floor(lerp(x, x0, x1, float(c0[0]), float(c1[0]))),
math.floor(lerp(x, x0, x1, float(c0[1]), float(c1[1]))),
math.floor(lerp(x, x0, x1, float(c0[2]), float(c1[2]))))
def gradient_func(colors):
grad_width = 1.0 / (len(colors)-1.0)
def _fun(value):
if value <= 0.0:
return colors[0]
elif value >= 1.0:
return colors[-1]
else:
pos = int(value / grad_width)
c0 = colors[pos]
c1 = colors[pos+1]
x = (value % grad_width)/grad_width
return rgb_lerp(x, 0.0, 1.0, c0, c1)
return _fun
def clamp(x, x0, x1):
if x > x1:
return x1
elif x < x0:
return x0
else:
return x
class ViewBase(object):
def render(self, screen):
pass
def click(self, location):
pass
class MessageDialog(ViewBase):
def __init__(self, model, text, accept, cancel=None):
self.accept = accept
self.cancel = cancel
self.buttons = ui.ButtonGrid(model.width, model.height, 4, 5)
self.buttons.add(3, 4, 'Kabul', click=self.accept_click,
bg_color=freqshow.ACCEPT_BG)
if cancel is not None:
self.buttons.add(0, 4, 'Iptal', click=self.cancel_click,
bg_color=freqshow.CANCEL_BG)
self.label = ui.render_text(text, size=freqshow.NUM_FONT,
fg=freqshow.BUTTON_FG, bg=freqshow.MAIN_BG)
self.label_rect = ui.align(self.label.get_rect(),
(0, 0, model.width, model.height))
def render(self, screen):
screen.fill(freqshow.MAIN_BG)
self.buttons.render(screen)
screen.blit(self.label, self.label_rect)
def click(self, location):
self.buttons.click(location)
def accept_click(self, button):
self.accept()
def cancel_click(self, button):
self.cancel()
# Deger ekranindaki numaralar.
class NumberDialog(ViewBase):
def __init__(self, model, label_text, unit_text, initial='0', accept=None,
cancel=None, has_auto=False, allow_negative=False):
self.value = str(initial)
self.unit_text = unit_text
self.model = model
self.accept = accept
self.cancel = cancel
self.buttons = ui.ButtonGrid(model.width, model.height, 4, 5)
self.buttons.add(0, 1, '1', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(1, 1, '2', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(2, 1, '3', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(0, 2, '4', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(1, 2, '5', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(2, 2, '6', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(0, 3, '7', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(1, 3, '8', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(2, 3, '9', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(1, 4, '0', font_size=freqshow.NUM_FONT, click=self.number_click)
self.buttons.add(2, 4, '.', font_size=freqshow.NUM_FONT, click=self.decimal_click)
self.buttons.add(0, 4, 'Sil', click=self.delete_click)
if not allow_negative:
self.buttons.add(3, 1, 'Temizle', click=self.clear_click)
else:
self.buttons.add(3, 1, '+/-', click=self.posneg_click)
self.buttons.add(3, 3, 'Iptal', click=self.cancel_click,
bg_color=freqshow.CANCEL_BG)
self.buttons.add(3, 4, 'Kabul', click=self.accept_click,
bg_color=freqshow.ACCEPT_BG)
if has_auto:
self.buttons.add(3, 2, 'Auto', click=self.auto_click)
self.input_rect = (0, 0, self.model.width, self.buttons.row_size)
self.label = ui.render_text(label_text, size=freqshow.MAIN_FONT,
fg=freqshow.INPUT_FG, bg=freqshow.INPUT_BG)
self.label_pos = ui.align(self.label.get_rect(), self.input_rect,
horizontal=ui.ALIGN_LEFT, hpad=10)
def render(self, screen):
screen.fill(freqshow.MAIN_BG)
screen.fill(freqshow.INPUT_BG, self.input_rect)
screen.blit(self.label, self.label_pos)
value_label = ui.render_text('{0} {1}'.format(self.value, self.unit_text),
size=freqshow.NUM_FONT, fg=freqshow.INPUT_FG, bg=freqshow.INPUT_BG)
screen.blit(value_label, ui.align(value_label.get_rect(), self.input_rect,
horizontal=ui.ALIGN_RIGHT, hpad=-10))
self.buttons.render(screen)
def click(self, location):
self.buttons.click(location)
def auto_click(self, button):
self.value = 'Auto'
def clear_click(self, button):
self.value = '0'
def delete_click(self, button):
if self.value == 'Auto':
return
elif len(self.value) > 1:
self.value = self.value[:-1]
else:
self.value = '0'
def cancel_click(self, button):
if self.cancel is not None:
self.cancel()
def accept_click(self, button):
if self.accept is not None:
self.accept(self.value)
def decimal_click(self, button):
if self.value == 'Auto':
self.value = '0.'
elif self.value.find('.') == -1:
self.value += '.'
def number_click(self, button):
if self.value == '0' or self.value == 'Auto':
self.value = button.text
else:
self.value += button.text
def posneg_click(self, button):
if self.value == 'Auto':
return
else:
if self.value[0] == '-':
self.value = self.value[1:]
else:
self.value = '-' + self.value
class SettingsList(ViewBase):
def __init__(self, model, controller):
self.model = model
self.controller = controller
centerfreq_text = 'Merkezi Frekans: {0:0.2f} MHz'.format(model.get_center_freq())
samplerate_text = 'Sample Rate: {0:0.2f} MHz'.format(model.get_sample_rate())
gain_text = 'Kazanc: {0} dB'.format(model.get_gain())
min_text = 'Min: {0} dB'.format(model.get_min_string())
max_text = 'Max: {0} dB'.format(model.get_max_string())
# Butonlarin olusumu
self.buttons = ui.ButtonGrid(model.width, model.height, 4, 5)
self.buttons.add(0, 0, centerfreq_text, colspan=4, click=self.centerfreq_click)
self.buttons.add(0, 1, samplerate_text, colspan=4, click=self.sample_click)
self.buttons.add(0, 2, gain_text, colspan=4, click=self.gain_click)
self.buttons.add(0, 3, min_text, colspan=2, click=self.min_click)
self.buttons.add(2, 3, max_text, colspan=2, click=self.max_click)
self.buttons.add(0, 4, 'Geri', click=self.controller.change_to_main)
def render(self, screen):
screen.fill(freqshow.MAIN_BG)
self.buttons.render(screen)
def click(self, location):
self.buttons.click(location)
# Butonlarin gorevi
def centerfreq_click(self, button):
self.controller.number_dialog('Frekans:', 'MHz',
initial='{0:0.2f}'.format(self.model.get_center_freq()),
accept=self.centerfreq_accept)
def centerfreq_accept(self, value):
self.model.set_center_freq(float(value))
self.controller.waterfall.clear_waterfall()
self.controller.change_to_settings()
def sample_click(self, button):
self.controller.number_dialog('Sample Rate:', 'MHz',
initial='{0:0.2f}'.format(self.model.get_sample_rate()),
accept=self.sample_accept)
def sample_accept(self, value):
self.model.set_sample_rate(float(value))
self.controller.waterfall.clear_waterfall()
self.controller.change_to_settings()
def gain_click(self, button):
self.controller.number_dialog('Kazanc:', 'dB',
initial=self.model.get_gain(), accept=self.gain_accept,
has_auto=True)
def gain_accept(self, value):
self.model.set_gain(value)
self.controller.waterfall.clear_waterfall()
self.controller.change_to_settings()
def min_click(self, button):
self.controller.number_dialog('Min:', 'dB',
initial=self.model.get_min_string(), accept=self.min_accept,
has_auto=True, allow_negative=True)
def min_accept(self, value):
self.model.set_min_intensity(value)
self.controller.waterfall.clear_waterfall()
self.controller.change_to_settings()
def max_click(self, button):
self.controller.number_dialog('Max:', 'dB',
initial=self.model.get_max_string(), accept=self.max_accept,
has_auto=True, allow_negative=True)
def max_accept(self, value):
self.model.set_max_intensity(value)
self.controller.waterfall.clear_waterfall()
self.controller.change_to_settings()
# Program Ana Ekran
class SpectrogramBase(ViewBase):
def __init__(self, model, controller):
self.model = model
self.controller = controller
self.buttons = ui.ButtonGrid(model.width, model.height, 4, 5)
self.buttons.add(0, 0, 'Ayarlar', click=self.controller.change_to_settings)
self.buttons.add(1, 0, 'Mod', click=self.controller.toggle_main, colspan=2)
self.buttons.add(3, 0, 'Cikis', click=self.quit_click,
bg_color=freqshow.CANCEL_BG)
self.overlay_enabled = True
def render_spectrogram(self, screen):
raise NotImplementedError
def render_hash(self, screen, x, size=5, padding=2):
y = self.model.height - self.buttons.row_size + padding
pygame.draw.lines(screen, freqshow.BUTTON_FG, False,
[(x, y), (x-size, y+size), (x+size, y+size), (x, y), (x, y+2*size)])
def render(self, screen):
screen.fill(freqshow.MAIN_BG)
if self.overlay_enabled:
spect_rect = (0, self.buttons.row_size, self.model.width,
self.model.height-2*self.buttons.row_size)
self.render_spectrogram(screen.subsurface(spect_rect))
self.render_hash(screen, 0)
self.render_hash(screen, self.model.width/2)
self.render_hash(screen, self.model.width-1)
bottom_row = (0, self.model.height-self.buttons.row_size,
self.model.width, self.buttons.row_size)
freq = self.model.get_center_freq()
bandwidth = self.model.get_sample_rate()
label = ui.render_text('{0:0.2f} Mhz'.format(freq-bandwidth/2.0),
size=freqshow.MAIN_FONT)
screen.blit(label, ui.align(label.get_rect(), bottom_row,
horizontal=ui.ALIGN_LEFT))
label = ui.render_text('{0:0.2f} Mhz'.format(freq),
size=freqshow.MAIN_FONT)
screen.blit(label, ui.align(label.get_rect(), bottom_row,
horizontal=ui.ALIGN_CENTER))
label = ui.render_text('{0:0.2f} Mhz'.format(freq+bandwidth/2.0),
size=freqshow.MAIN_FONT)
screen.blit(label, ui.align(label.get_rect(), bottom_row,
horizontal=ui.ALIGN_RIGHT))
label = ui.render_text('{0:0.0f} dB'.format(self.model.min_intensity),
size=freqshow.MAIN_FONT)
screen.blit(label, ui.align(label.get_rect(), spect_rect,
horizontal=ui.ALIGN_LEFT, vertical=ui.ALIGN_BOTTOM))
label = ui.render_text('{0:0.0f} dB'.format(self.model.max_intensity),
size=freqshow.MAIN_FONT)
screen.blit(label, ui.align(label.get_rect(), spect_rect,
horizontal=ui.ALIGN_LEFT, vertical=ui.ALIGN_TOP))
self.buttons.render(screen)
else:
self.render_spectrogram(screen)
def click(self, location):
mx, my = location
if my > self.buttons.row_size and my < 4*self.buttons.row_size:
self.overlay_enabled = not self.overlay_enabled
else:
self.buttons.click(location)
def quit_click(self, button):
self.controller.message_dialog('Cikis',
accept=self.quit_accept)
def quit_accept(self):
sys.exit(0)
class WaterfallSpectrogram(SpectrogramBase):
def __init__(self, model, controller):
super(WaterfallSpectrogram, self).__init__(model, controller)
self.color_func = gradient_func(freqshow.WATERFALL_GRAD)
self.waterfall = pygame.Surface((model.width, model.height))
def clear_waterfall(self):
self.waterfall.fill(freqshow.MAIN_BG)
def render_spectrogram(self, screen):
freqs = self.model.get_data()
self.waterfall.scroll(0, -1)
freqs = (freqs-self.model.min_intensity)/self.model.range
x, y, width, height = screen.get_rect()
wx, wy, wwidth, wheight = self.waterfall.get_rect()
offset = wheight - height
self.waterfall.lock()
for i in range(width):
power = clamp(freqs[i], 0.0, 1.0)
self.waterfall.set_at((i, wheight-1), self.color_func(power))
self.waterfall.unlock()
screen.blit(self.waterfall, (0, 0), area=(0, offset, width, height))
class InstantSpectrogram(SpectrogramBase):
def __init__(self, model, controller):
super(InstantSpectrogram, self).__init__(model, controller)
def render_spectrogram(self, screen):
freqs = self.model.get_data()
x, y, width, height = screen.get_rect()
freqs = height-np.floor(((freqs-self.model.min_intensity)/self.model.range)*height)
screen.fill(freqshow.MAIN_BG)
ylast = freqs[0]
for i in range(1, width):
y = freqs[i]
pygame.draw.line(screen, freqshow.INSTANT_LINE, (i-1, ylast), (i, y))
ylast = y
|
|
# coding: utf-8
# In[ ]:
from functools import wraps
from collections import OrderedDict
import re
import numpy as np
import pandas as pd
from IPython.core.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.interpolate import interp1d
from sklearn.metrics import make_scorer
from sklearn import cross_validation as cv
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.manifold import TSNE
from sklearn import tree
from sklearn.ensemble import ExtraTreesClassifier
def random_subset(X, y, dims, n_shuffle=10, seed=42):
"""Selects a random subset of X and y according to the dimensions
Params:
X: n x d pandas dataframe
y: n x 1 pandas dataframe
dims: list of tuples
n_shuffle: run n_shuffle shuffle operations on the set of indices
seed: seed the random number generator
Returns:
X', y': sampled dataframes
Example:
Select only 75% of the values with target 0, and all values
where target is 1
$ dims = [(0, 0.75), (1, 1.0)]
"""
np.random.seed(seed)
idx = []
for target, factor in dims:
if (0 <= factor < 1.0):
n_samples = int(len(y[y == target]) * factor)
idx_sub = np.random.choice(y.index[y == target], n_samples, replace=False)
else:
idx_sub = y.index[y == target]
# Stack the indices together
idx = np.hstack((idx, idx_sub))
for i in range(n_shuffle):
np.random.shuffle(idx)
return X.loc[idx.astype(int)], y[idx.astype(int)]
def truncate(value, max_length=100, suffix="...", pre=5):
if len(value) > max_length:
return value[0:pre] + suffix + value[pre+len(suffix)+1:max_length+1]
else:
return value
def score(*args, **kwargs):
"""Decorator, that transform a function to a scorer.
A scorer has the arguments estimator, X, y_true, sample_weight=None
"""
decorator_args = args
decorator_kwargs = kwargs
def score_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
func_args = args
func_kwargs = kwargs
scorer = make_scorer(func, *decorator_args, **decorator_kwargs)
return scorer(*func_args, **func_kwargs)
return func_wrapper
return score_decorator
def folds(y, n_folds=4, stratified=False, random_state=42, shuffle=True, **kwargs):
if stratified:
return cv.StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle, random_state=random_state, **kwargs)
return cv.KFold(n=len(y), n_folds=n_folds, shuffle=shuffle, random_state=random_state, **kwargs)
def cross_val(estimator, X, y, n_jobs=-1, n_folds=4, proba=False, **kwargs):
# Extract values from pandas DF
if hasattr(X, 'values'):
X = X.values
if hasattr(y, 'values'):
y = y.values
# Return Cross validation score
if proba is True:
estimator.predict = lambda self, *args, **kwargs: self.predict_proba(*args, **kwargs)[:,1]
return cv.cross_val_score(estimator, X, y, cv=folds(y, n_folds=n_folds), n_jobs=n_jobs, **kwargs)
class BaseTransform(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Transform Interface"""
def __init__(self):
pass
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X):
return X
class PandasTransform(BaseTransform):
def __init__(self):
pass
def transform(self, X):
return X.values
class Log1pTransform(BaseTransform):
def __init__(self, columns=None):
self.columns = columns=None
def transform(self, X):
if self.columns:
for column in self.columns:
X[column] = np.log1p(X[column])
return X
else:
return np.log1p(X)
def inverse_transform(self, X):
if self.columns:
for column in self.columns:
X[column] = np.expm1(X[column])
return X
else:
return np.expm1(X)
class NanPreProcessor(TransformerMixin):
"""Fills NaN with class median
@source: https://www.kaggle.com/cbrogan/titanic/xgboost-example-python/code
@based: http://stackoverflow.com/a/25562948"""
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].median() for c in X], index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
def tsne_plot(X, y, title="", metric='l1', random_state=0, legend_loc='upper left', n_samples=None, n_components=2):
"""Plots the first 2 components of the t-distributed Stochastic Neighbor Embedding
References:
* http://blog.kaggle.com/2012/11/02/t-distributed-stochastic-neighbor-embedding-wins-merck-viz-challenge/"""
if n_samples:
# Get the shape of the training set
n_samples_orig, n_features = np.shape(X)
# Select 5000 random indices
rnd_indices = np.random.choice(n_samples_orig, n_samples)
X = X[rnd_indices]
y = y[rnd_indices]
# Create a t-SNE model
model = TSNE(n_components=n_components, random_state=random_state, metric=metric)
X_trans = model.fit_transform(X)
# Get a list of unique labels
labels = np.unique(y)
# This is only needed to adjust the size of the figure
# because otherwise it is really small
plt.figure(figsize=(15, 15), dpi=120)
# Get a list of color values
colors = cm.rainbow(np.linspace(0, 1, len(labels) * 2))
# Loop over labels
# enumerate also return the index from the list
for i, label in enumerate(labels):
# Get a feature vector with the matching label
# and add a scatter plot with the dataset
plt.scatter(X_trans[y == label][:,0], X_trans[y == label][:,1], c=colors[i], label=label)
# Add a legend
plt.legend(loc=legend_loc)
# Add axis labels
plt.xlabel("1st component")
plt.ylabel("2nd component")
# Add a title
plt.title(title)
# Render the plot
plt.show()
def duplicate_columns(data):
"""Find columns that are a duplicate of other columns
Params:
data pd.DataFrame
Returns:
list of column labels
"""
correlation = data.corr()
# Create a diagonal condition to filter the correlation of a column with itself
diag_mask = np.zeros(correlation.shape, dtype='bool')
np.fill_diagonal(diag_mask, True)
# Creates a mask of equal columns
equal_mask = np.isclose(correlation.mask(cond=diag_mask).abs().values, 1.0)
original_columns = set()
duplicate_columns = set()
# Iterate through the columns
for col in np.unique(correlation[equal_mask].index):
# Get all perfectly correlated cols
cols = list(correlation[col][np.isclose(correlation.ix[col].abs(), 1.0)].index)
# Sort by length
cols.sort(key=len)
# Find the original col
for c in cols:
if c in original_columns:
original_col = c
break
else:
original_col = cols[0]
original_columns.add(original_col)
# Remove the original column
cols.remove(original_col)
# Add the column to the duplicate cols
for c in cols:
duplicate_columns.add(c)
return list(duplicate_columns)
def zero_var_columns(data):
"""Find columns containing zero variance data
Params:
data pd.DataFrame
Returns:
list of column labels
"""
u = data.apply(lambda x: len(x.unique()))
return list(u[u == 1].index.values)
class Table(object):
def __init__(self, max_col_width=30):
self.values = OrderedDict()
self.size = 0
self.max_col_width = max_col_width
def add_column(self, label, values):
if label in self.values:
raise ValueError('Duplicate Column')
self.values[label] = values
self.size = max(len(values), self.size)
def max_length(self, col):
return max(max(list(map(lambda c: len(str(c)), self.values[col]))), len(col))
def html(self):
output = ""
output += "<table>"
output += "<thead>"
output += "<tr>"
for col in self.values:
output += '<th>{name:s}</th>'.format(name=col)
output += "</tr>"
output += "</thead>"
output += "<tbody>"
for r in range(self.size):
output += "<tr>"
for col in self.values:
output += '<td>{name:s}</td>'.format(name=str(self.values[col][r]))
output += "</tr>"
output += "</tbody>"
output += "</table>"
return output
def __str__(self):
col_sep = " |"
output = ""
dim = {col: min(self.max_length(col), self.max_col_width) for col in self.values}
for col in self.values:
output += ' {name:{fill}<{width}s}'.format(name=truncate(col, dim[col]), fill=" ", width=dim[col])
output += col_sep
output += "\n"
for col in self.values:
output += ' {name:{fill}<{width}s}'.format(name="", fill="-", width=dim[col])
output += col_sep
output += "\n"
for r in range(self.size):
for col in self.values:
output += ' {name:{fill}<{width}s}'.format(name=truncate(str(self.values[col][r]), dim[col]), fill=' ', width=dim[col])
output += col_sep
output += "\n"
return output
def get_categoric_columns(data):
return data.select_dtypes(include=['object', 'category']).columns
def get_numeric_columns(data):
return data.select_dtypes(exclude=['object', 'category']).columns
def pretty_stats(data, stat=None, target_key=None):
"""Generate a pretty statistic about the dataframe *data*"""
cat_columns = get_categoric_columns(data)
num_columns = get_numeric_columns(data)
if not stat or stat is 'general':
table = Table()
table.add_column('property', values=[
'Number of features',
'Number of categorical features',
'Number of numerical features',
'Number of Samples',
])
table.add_column('values', values=[
len(data.columns),
len(cat_columns),
len(num_columns),
len(data),
])
display(HTML('<h1>General</h1>'))
display(HTML(table.html()))
if target_key and (not stat or stat is 'target'):
table = Table()
aggregate = data.groupby([target_key]).agg({data.columns[0]:len})
table.add_column('target', values=aggregate.index.values)
table.add_column('count', values=aggregate.values.flatten())
display(HTML('<h1>Distribution per Target</h1>'))
display(HTML(table.html()))
if not stat or stat is 'distribution':
table = Table()
num_data = data[num_columns]
distribution = num_data.describe()
table.add_column('feature', values=list(num_data.columns))
table.add_column('Unique', values=num_data.apply(lambda x: len(x.unique())))
table.add_column('NaN', values=num_data.isnull().sum().values)
table.add_column('min', values=distribution.ix['min'].values)
table.add_column('min count', values=num_data[num_data == num_data.min()].count())
table.add_column('mean', values=distribution.ix['mean'].values)
table.add_column('max', values=distribution.ix['max'].values)
table.add_column('max count', values=num_data[num_data == num_data.max()].count())
display(HTML('<h1>Distribution of Numerical Values</h1>'))
display(HTML(table.html()))
table = Table()
cat_data = data[cat_columns]
table.add_column('feature', values=list(cat_data.columns))
table.add_column('Num Categories', values=cat_data.apply(lambda x: len(x.unique())))
table.add_column('Categories', values=cat_data.apply(lambda x: list(set(x))))
table.add_column('NaN', values=cat_data.isnull().sum().values)
display(HTML('<h1>Distribution of Categorical Features</h1>'))
display(HTML(table.html()))
if not stat or stat is 'correlation':
table = Table()
num_data = data[num_columns]
correlation = num_data.corr()
# Create a diagonal condition to filter the correlation of a column with itself
diag_mask = np.zeros(correlation.shape, dtype='bool')
np.fill_diagonal(diag_mask, True)
table.add_column('feature', values=list(num_data.columns))
table.add_column('highest value', values=correlation.mask(cond=diag_mask).abs().max(skipna=True).values)
table.add_column('correlated with', values=correlation.mask(cond=diag_mask).abs().idxmax(skipna=True).values)
table.add_column('mean', values=correlation.mask(cond=diag_mask).abs().mean().values)
display(HTML('<h1>Correlation of Numerical Features</h1>'))
display(HTML(table.html()))
def feature_importance(X, y, criterion='entropy', n_estimators=250, random_state=0):
clf = ExtraTreesClassifier(n_estimators=n_estimators, random_state=random_state, criterion=criterion)
clf = clf.fit(X, y)
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
return pd.DataFrame({"column":X.columns, "importance":importances, "std": std}).set_index(indices)
def plot_feature_importance(X, y, **kwargs):
importances = feature_importance(X, y, **kwargs).sort(columns="importance", ascending=False)
# Plot the feature importances of the forest
plt.figure(figsize=(15, 5), dpi=120)
plt.title("Feature importances")
plt.bar(range(len(importances)), importances['importance'].values, color="r", yerr=importances['std'].values, align="center")
plt.xticks(range(len(importances)), importances.column.values)
plt.xticks(rotation=90)
plt.xlim([-1, len(importances)])
plt.show()
def split_dummies(data, train, col):
dummies_train = pd.get_dummies(train[col], prefix=col)
dummies = pd.get_dummies(data[col], prefix=col)
for d_col in dummies_train.columns:
data[d_col] = dummies[d_col].values
print("Created dummies for %s: " % col, dummies_train.columns)
data.drop(col, axis=1, inplace=True)
return data
def split_most_common(data, train, col):
mc_mask = np.isclose(data[col], train[col].value_counts().index[0])
data[col + '_mc'] = mc_mask.astype(int)
data[col + '_log'] = normalize(data.loc[~mc_mask, col].map(np.log))
data.set_value(mc_mask, col + '_log', 0)
data[col + '_log'].fillna(0, inplace=True)
print("Created features for %s: " % col, col + '_mc', col + '_log')
data.drop(col, axis=1, inplace=True)
return data
def normalize(data):
return data.apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x)))
def minmax(data):
xmin = data.min()
return (data - xmin) / (data.max() - xmin)
def target_hist(data, X, y, bins=100, figsize=(15, 5), density=False):
# setting up the axes
fig = plt.figure(figsize=figsize, dpi=120)
targets = np.unique(y)
colors = cm.rainbow(np.linspace(0, 1, len(targets)))
width = None
_bins = np.linspace(np.min(X), np.max(X), bins, endpoint=True)
s = np.asarray(list(range(len(targets)))) - (len(targets) - 1) * 0.5
# now plot
for i, t in enumerate(targets):
h, b = np.histogram(X[y == t], bins=_bins, density=density)
center = (b[:-1] + b[1:]) / 2
if width is None:
width = np.abs(center[0] - center[1]) / len(targets) * 0.8
# f = interp1d(center, h, kind='cubic', fill_value=0, bounds_error=False)
# x = np.linspace(np.min(center), np.max(center), num=len(center)*10, endpoint=True)
# plt.plot(x, f(x), label=t)
offset = s[i] * width
plt.bar(center + offset, h, width=width, align='center', color=colors[i], label=t, alpha=0.75)
# show
plt.legend()
plt.show()
def feature_hists(data, bins=20, figsize=(15, 5)):
num_data = data[get_numeric_columns(data)]
uniques = num_data.apply(lambda x: len(x.unique()))
bin_options = {col: min(bins, uniques[col]) for col in num_data.columns}
for col in get_categoric_columns(data):
plt.figure(figsize=figsize, dpi=120)
data[col].value_counts().plot(kind='bar')
plt.title(col)
for col in num_data.columns:
plt.figure(figsize=figsize, dpi=120)
plt.title(col)
data[col].plot(kind='hist', alpha=0.5, bins=bin_options[col])
|
|
import json
import re
import uuid
from typing import Any, Dict, List, Optional
# This import verifies that the dependencies are available.
from pyhive import hive # noqa: F401
from pyhive.sqlalchemy_hive import HiveDate, HiveDecimal, HiveTimestamp
from datahub.ingestion.extractor import schema_util
from datahub.ingestion.source.sql.sql_common import (
BasicSQLAlchemyConfig,
SQLAlchemySource,
register_custom_type,
)
from datahub.metadata.com.linkedin.pegasus2avro.schema import (
DateTypeClass,
NullTypeClass,
NumberTypeClass,
SchemaField,
TimeTypeClass,
)
register_custom_type(HiveDate, DateTypeClass)
register_custom_type(HiveTimestamp, TimeTypeClass)
register_custom_type(HiveDecimal, NumberTypeClass)
class HiveConfig(BasicSQLAlchemyConfig):
# defaults
scheme = "hive"
# Hive SQLAlchemy connector returns views as tables.
# See https://github.com/dropbox/PyHive/blob/b21c507a24ed2f2b0cf15b0b6abb1c43f31d3ee0/pyhive/sqlalchemy_hive.py#L270-L273.
# Disabling views helps us prevent this duplication.
include_views = False
class HiveSource(SQLAlchemySource):
_COMPLEX_TYPE = re.compile("^(struct|map|array|uniontype)")
def __init__(self, config, ctx):
super().__init__(config, ctx, "hive")
@classmethod
def create(cls, config_dict, ctx):
config = HiveConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_schema_names(self, inspector):
assert isinstance(self.config, HiveConfig)
# This condition restricts the ingestion to the specified database.
if self.config.database:
return [self.config.database]
else:
return super().get_schema_names(inspector)
def get_schema_fields_for_column(
self,
dataset_name: str,
column: Dict[Any, Any],
pk_constraints: Optional[Dict[Any, Any]] = None,
) -> List[SchemaField]:
fields = super().get_schema_fields_for_column(
dataset_name, column, pk_constraints
)
if self._COMPLEX_TYPE.match(fields[0].nativeDataType) and isinstance(
fields[0].type.type, NullTypeClass
):
assert len(fields) == 1
field = fields[0]
# Get avro schema for subfields along with parent complex field
avro_schema = self.get_avro_schema_from_native_data_type(
field.nativeDataType, column["name"]
)
newfields = schema_util.avro_schema_to_mce_fields(
json.dumps(avro_schema), default_nullable=True
)
# First field is the parent complex field
newfields[0].nullable = field.nullable
newfields[0].description = field.description
newfields[0].isPartOfKey = field.isPartOfKey
return newfields
return fields
def get_avro_schema_from_native_data_type(
self, column_type: str, column_name: str
) -> Dict[str, Any]:
# Below Record structure represents the dataset level
# Inner fields represent the complex field (struct/array/map/union)
return {
"type": "record",
"name": "__struct_",
"fields": [
{"name": column_name, "type": _parse_datatype_string(column_type)}
],
}
_BRACKETS = {"(": ")", "[": "]", "{": "}", "<": ">"}
_all_atomic_types = {
"string": "string",
"int": "int",
"integer": "int",
"double": "double",
"double precision": "double",
"binary": "string",
"boolean": "boolean",
"float": "float",
"tinyint": "int",
"smallint": "int",
"int": "int",
"bigint": "long",
"varchar": "string",
"char": "string",
}
_FIXED_DECIMAL = re.compile(r"(decimal|numeric)(\(\s*(\d+)\s*,\s*(\d+)\s*\))?")
_FIXED_STRING = re.compile(r"(var)?char\(\s*(\d+)\s*\)")
def _parse_datatype_string(s, **kwargs):
s = s.strip()
if s.startswith("array<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
return {
"type": "array",
"items": _parse_datatype_string(s[6:-1]),
"native_data_type": s,
}
elif s.startswith("map<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
parts = _ignore_brackets_split(s[4:-1], ",")
if len(parts) != 2:
raise ValueError(
"The map type string format is: 'map<key_type,value_type>', "
+ "but got: %s" % s
)
kt = _parse_datatype_string(parts[0])
vt = _parse_datatype_string(parts[1])
# keys are assumed to be strings in avro map
return {
"type": "map",
"values": vt,
"native_data_type": s,
"key_type": kt,
"key_native_data_type": parts[0],
}
elif s.startswith("uniontype<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
parts = _ignore_brackets_split(s[10:-1], ",")
t = []
ustruct_seqn = 0
for part in parts:
if part.startswith("struct<"):
# ustruct_seqn defines sequence number of struct in union
t.append(_parse_datatype_string(part, ustruct_seqn=ustruct_seqn))
ustruct_seqn += 1
else:
t.append(_parse_datatype_string(part))
return t
elif s.startswith("struct<"):
if s[-1] != ">":
raise ValueError("'>' should be the last char, but got: %s" % s)
return _parse_struct_fields_string(s[7:-1], **kwargs)
elif ":" in s:
return _parse_struct_fields_string(s, **kwargs)
else:
return _parse_basic_datatype_string(s)
def _parse_struct_fields_string(s, **kwargs):
parts = _ignore_brackets_split(s, ",")
fields = []
for part in parts:
name_and_type = _ignore_brackets_split(part, ":")
if len(name_and_type) != 2:
raise ValueError(
"The struct field string format is: 'field_name:field_type', "
+ "but got: %s" % part
)
field_name = name_and_type[0].strip()
if field_name.startswith("`"):
if field_name[-1] != "`":
raise ValueError("'`' should be the last char, but got: %s" % s)
field_name = field_name[1:-1]
field_type = _parse_datatype_string(name_and_type[1])
fields.append({"name": field_name, "type": field_type})
if kwargs.get("ustruct_seqn") is not None:
struct_name = "__structn_{}_{}".format(
kwargs["ustruct_seqn"], str(uuid.uuid4()).replace("-", "")
)
else:
struct_name = "__struct_{}".format(str(uuid.uuid4()).replace("-", ""))
return {
"type": "record",
"name": struct_name,
"fields": fields,
"native_data_type": "struct<{}>".format(s),
}
def _parse_basic_datatype_string(s):
if s in _all_atomic_types.keys():
return {
"type": _all_atomic_types[s],
"native_data_type": s,
"_nullable": True,
}
elif _FIXED_STRING.match(s):
m = _FIXED_STRING.match(s)
return {"type": "string", "native_data_type": s, "_nullable": True}
elif _FIXED_DECIMAL.match(s):
m = _FIXED_DECIMAL.match(s)
if m.group(2) is not None: # type: ignore
return {
"type": "bytes",
"logicalType": "decimal",
"precision": int(m.group(3)), # type: ignore
"scale": int(m.group(4)), # type: ignore
"native_data_type": s,
"_nullable": True,
}
else:
return {
"type": "bytes",
"logicalType": "decimal",
"native_data_type": s,
"_nullable": True,
}
elif s == "date":
return {
"type": "int",
"logicalType": "date",
"native_data_type": s,
"_nullable": True,
}
elif s == "timestamp":
return {
"type": "int",
"logicalType": "timestamp-millis",
"native_data_type": s,
"_nullable": True,
}
else:
return {"type": "null", "native_data_type": s, "_nullable": True}
def _ignore_brackets_split(s, separator):
"""
Splits the given string by given separator, but ignore separators inside brackets pairs, e.g.
given "a,b" and separator ",", it will return ["a", "b"], but given "a<b,c>, d", it will return
["a<b,c>", "d"].
"""
parts = []
buf = ""
level = 0
for c in s:
if c in _BRACKETS.keys():
level += 1
buf += c
elif c in _BRACKETS.values():
if level == 0:
raise ValueError("Brackets are not correctly paired: %s" % s)
level -= 1
buf += c
elif c == separator and level > 0:
buf += c
elif c == separator:
parts.append(buf)
buf = ""
else:
buf += c
if len(buf) == 0:
raise ValueError("The %s cannot be the last char: %s" % (separator, s))
parts.append(buf)
return parts
|
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install *_incremental.apk targets as well as their dependent files."""
import argparse
import glob
import logging
import os
import posixpath
import shutil
import sys
import zipfile
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
import devil_chromium
from devil.android import apk_helper
from devil.android import device_utils
from devil.android.sdk import version_codes
from devil.utils import reraiser_thread
from devil.utils import run_tests_helper
from pylib import constants
from pylib.utils import time_profile
prev_sys_path = list(sys.path)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
from util import build_utils
sys.path = prev_sys_path
def _DeviceCachePath(device):
file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
return os.path.join(constants.GetOutDirectory(), file_name)
def _TransformDexPaths(paths):
"""Given paths like ["/a/b/c", "/a/c/d"], returns ["b.c", "c.d"]."""
if len(paths) == 1:
return [os.path.basename(paths[0])]
prefix_len = len(os.path.commonprefix(paths))
return [p[prefix_len:].replace(os.sep, '.') for p in paths]
def _Execute(concurrently, *funcs):
"""Calls all functions in |funcs| concurrently or in sequence."""
timer = time_profile.TimeProfile()
if concurrently:
reraiser_thread.RunAsync(funcs)
else:
for f in funcs:
f()
timer.Stop(log=False)
return timer
def _GetDeviceIncrementalDir(package):
"""Returns the device path to put incremental files for the given package."""
return '/data/local/tmp/incremental-app-%s' % package
def _HasClasses(jar_path):
"""Returns whether the given jar contains classes.dex."""
with zipfile.ZipFile(jar_path) as jar:
return 'classes.dex' in jar.namelist()
def Uninstall(device, package, enable_device_cache=False):
"""Uninstalls and removes all incremental files for the given package."""
main_timer = time_profile.TimeProfile()
device.Uninstall(package)
if enable_device_cache:
# Uninstall is rare, so just wipe the cache in this case.
cache_path = _DeviceCachePath(device)
if os.path.exists(cache_path):
os.unlink(cache_path)
device.RunShellCommand(['rm', '-rf', _GetDeviceIncrementalDir(package)],
check_return=True)
logging.info('Uninstall took %s seconds.', main_timer.GetDelta())
def Install(device, apk, split_globs=None, native_libs=None, dex_files=None,
enable_device_cache=False, use_concurrency=True,
show_proguard_warning=False, permissions=(),
allow_downgrade=True):
"""Installs the given incremental apk and all required supporting files.
Args:
device: A DeviceUtils instance.
apk: The path to the apk, or an ApkHelper instance.
split_globs: Glob patterns for any required apk splits (optional).
native_libs: List of app's native libraries (optional).
dex_files: List of .dex.jar files that comprise the app's Dalvik code.
enable_device_cache: Whether to enable on-device caching of checksums.
use_concurrency: Whether to speed things up using multiple threads.
show_proguard_warning: Whether to print a warning about Proguard not being
enabled after installing.
permissions: A list of the permissions to grant, or None to grant all
non-blacklisted permissions in the manifest.
"""
main_timer = time_profile.TimeProfile()
install_timer = time_profile.TimeProfile()
push_native_timer = time_profile.TimeProfile()
push_dex_timer = time_profile.TimeProfile()
apk = apk_helper.ToHelper(apk)
apk_package = apk.GetPackageName()
device_incremental_dir = _GetDeviceIncrementalDir(apk_package)
# Install .apk(s) if any of them have changed.
def do_install():
install_timer.Start()
if split_globs:
splits = []
for split_glob in split_globs:
splits.extend((f for f in glob.glob(split_glob)))
device.InstallSplitApk(apk, splits, reinstall=True,
allow_cached_props=True, permissions=permissions,
allow_downgrade=allow_downgrade)
else:
device.Install(apk, reinstall=True, permissions=permissions,
allow_downgrade=allow_downgrade)
install_timer.Stop(log=False)
# Push .so and .dex files to the device (if they have changed).
def do_push_files():
if native_libs:
push_native_timer.Start()
with build_utils.TempDir() as temp_dir:
device_lib_dir = posixpath.join(device_incremental_dir, 'lib')
for path in native_libs:
# Note: Can't use symlinks as they don't work when
# "adb push parent_dir" is used (like we do here).
shutil.copy(path, os.path.join(temp_dir, os.path.basename(path)))
device.PushChangedFiles([(temp_dir, device_lib_dir)],
delete_device_stale=True)
push_native_timer.Stop(log=False)
if dex_files:
push_dex_timer.Start()
# Put all .dex files to be pushed into a temporary directory so that we
# can use delete_device_stale=True.
with build_utils.TempDir() as temp_dir:
device_dex_dir = posixpath.join(device_incremental_dir, 'dex')
# Ensure no two files have the same name.
transformed_names = _TransformDexPaths(dex_files)
for src_path, dest_name in zip(dex_files, transformed_names):
# Binary targets with no extra classes create .dex.jar without a
# classes.dex (which Android chokes on).
if _HasClasses(src_path):
shutil.copy(src_path, os.path.join(temp_dir, dest_name))
device.PushChangedFiles([(temp_dir, device_dex_dir)],
delete_device_stale=True)
push_dex_timer.Stop(log=False)
def check_selinux():
# Marshmallow has no filesystem access whatsoever. It might be possible to
# get things working on Lollipop, but attempts so far have failed.
# http://crbug.com/558818
has_selinux = device.build_version_sdk >= version_codes.LOLLIPOP
if has_selinux and apk.HasIsolatedProcesses():
raise Exception('Cannot use incremental installs on Android L+ without '
'first disabling isoloated processes.\n'
'To do so, use GN arg:\n'
' disable_incremental_isolated_processes=true')
cache_path = _DeviceCachePath(device)
def restore_cache():
if not enable_device_cache:
logging.info('Ignoring device cache')
return
if os.path.exists(cache_path):
logging.info('Using device cache: %s', cache_path)
with open(cache_path) as f:
device.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.info('No device cache present: %s', cache_path)
def save_cache():
with open(cache_path, 'w') as f:
f.write(device.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
# Create 2 lock files:
# * install.lock tells the app to pause on start-up (until we release it).
# * firstrun.lock is used by the app to pause all secondary processes until
# the primary process finishes loading the .dex / .so files.
def create_lock_files():
# Creates or zeros out lock files.
cmd = ('D="%s";'
'mkdir -p $D &&'
'echo -n >$D/install.lock 2>$D/firstrun.lock')
device.RunShellCommand(cmd % device_incremental_dir, check_return=True)
# The firstrun.lock is released by the app itself.
def release_installer_lock():
device.RunShellCommand('echo > %s/install.lock' % device_incremental_dir,
check_return=True)
# Concurrency here speeds things up quite a bit, but DeviceUtils hasn't
# been designed for multi-threading. Enabling only because this is a
# developer-only tool.
setup_timer = _Execute(
use_concurrency, create_lock_files, restore_cache, check_selinux)
_Execute(use_concurrency, do_install, do_push_files)
finalize_timer = _Execute(use_concurrency, release_installer_lock, save_cache)
logging.info(
'Took %s seconds (setup=%s, install=%s, libs=%s, dex=%s, finalize=%s)',
main_timer.GetDelta(), setup_timer.GetDelta(), install_timer.GetDelta(),
push_native_timer.GetDelta(), push_dex_timer.GetDelta(),
finalize_timer.GetDelta())
if show_proguard_warning:
logging.warning('Target had proguard enabled, but incremental install uses '
'non-proguarded .dex files. Performance characteristics '
'may differ.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('apk_path',
help='The path to the APK to install.')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--native_lib',
dest='native_libs',
help='Path to native library (repeatable)',
action='append',
default=[])
parser.add_argument('--dex-file',
dest='dex_files',
help='Path to dex files (repeatable)',
action='append',
default=[])
parser.add_argument('-d', '--device', dest='device',
help='Target device for apk to install on.')
parser.add_argument('--uninstall',
action='store_true',
default=False,
help='Remove the app and all side-loaded files.')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--no-threading',
action='store_false',
default=True,
dest='threading',
help='Do not install and push concurrently')
parser.add_argument('--no-cache',
action='store_false',
default=True,
dest='cache',
help='Do not use cached information about what files are '
'currently on the target device.')
parser.add_argument('--show-proguard-warning',
action='store_true',
default=False,
help='Print a warning about proguard being disabled')
parser.add_argument('--dont-even-try',
help='Prints this message and exits.')
parser.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--disable-downgrade',
action='store_false',
default=True,
dest='allow_downgrade',
help='Disable install of apk with lower version number'
'than the version already on the device.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType('Debug')
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
devil_chromium.Initialize(output_directory=constants.GetOutDirectory())
if args.dont_even_try:
logging.fatal(args.dont_even_try)
return 1
# Retries are annoying when commands fail for legitimate reasons. Might want
# to enable them if this is ever used on bots though.
device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device,
default_retries=0,
enable_device_files_cache=True)[0]
apk = apk_helper.ToHelper(args.apk_path)
if args.uninstall:
Uninstall(device, apk.GetPackageName(), enable_device_cache=args.cache)
else:
Install(device, apk, split_globs=args.splits, native_libs=args.native_libs,
dex_files=args.dex_files, enable_device_cache=args.cache,
use_concurrency=args.threading,
show_proguard_warning=args.show_proguard_warning,
allow_downgrade=args.allow_downgrade)
if __name__ == '__main__':
sys.exit(main())
|
|
# This is the initial proof of concept for the core functions of the PDX Pool Schedule Scraper.
# Fairly disorganized, as I'm just playing with examples here.
# 31 Jan 2015 - Created by Emily Cain
# thanks to the authors of "Automate the Boring Stuff with Python", which was my starting point for this project.
# https://automatetheboringstuff.com/chapter11/
import requests, bs4, re
class PageScraper:
def __init__(self, url=None, file=None):
"""Keyword Arguments:
url - a string indicating an URL on the web
file - if using own HTML file, pass in a File object
"""
if url:
self.url = url
self.get_page_from_url(self, url)
if file:
self.file = file
self.get_page_from_file(self, file)
if not file and not url:
raise InputError('need url or file')
class TableScraper:
def __init__(self, table):
"""Keyword Arguments:
table - in the form beautiful_soup.select(selector)[optional index]
"""
self.table = table
self.list_of_lists = []
def __str__(self):
if self.table.get("id"):
return "table id=" + str(self.table.get("id"))
else:
return "table without id"
#this puts a text string in the list of lists to be processed later. What if I could do the conversion to a dictionary of times upfront?
def scrape_row(self, row):
for i in range(0, len(row.select('td')), 1):
if list_of_lists[i] is None:
list_of_lists.append([])
# will eventually be making a list_of_dicts instead
else:
try:
# replace this with a function that creates a dictionary entry as below.
list_of_lists[i].append(str(row[i].text.encode('UTF-8')))
except:
list_of_lists[i].append('err')
def scrape_cell(self, cell):
# eventually I will take some data from this page...
# url = "https://www.portlandoregon.gov/parks/60939"
# res = requests.get(url)
# pool_soup = bs4.BeautifulSoup(res.text)
# ... but for now I will just use a sample file drawn from that page.
example_file = open('sample-table.html')
example_soup = bs4.BeautifulSoup(example_file)
########################
# list of lists method #
########################
list_of_lists = []
for row in example_soup.select('table')[0].select('tr'):
i = 0
for cell in row.select('td'):
# if list hasn't been populated with lists, do so
try:
list_of_lists[i]
except IndexError:
list_of_lists.append([])
# put cell data into the list. encode properly to avoid errrors
try:
list_of_lists[i].append(str(cell.text.encode('UTF-8')))
except:
list_of_lists[i].append('err')
i += 1
print 'here is the list of lists'
for item in list_of_lists:
print '----------------------------'
print item
#####################
# add to dictionary #
#####################
days_dict = {
'Monday' : [],
'Tuesday' : [],
'Wednesday' : [],
'Thursday' : [],
'Friday' : [],
'Saturday' : [],
'Sunday' : [],
}
days_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
day_number_dict = {
0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
}
# for sublist in list_of_lists:
# schedule_day = ''
# for string in sublist:
# for day_of_week in days_list:
# if day_of_week in string:
# schedule_day = day_of_week
# if schedule_day != '':
# days_dict[schedule_day] = string
for j in range(0, len(list_of_lists), 1):
sublist = list_of_lists[j]
day_of_week = days_list[j]
print 'using', day_of_week, 'as key for', sublist
for string in sublist:
days_dict[day_of_week].append(string)
for key in days_dict:
print '----------'
print key
print days_dict[key]
print 'now attempting to parse time values'
from time import strftime, strptime
def parse_inner(txt):
times = re.findall('\d{1,2}:\d{1,2}', txt)
if len(times) == 2:
start_time_str = times[0]
end_time_str = times[1]
start_am = txt.split(start_time_str, 1)[1][0:2].lower() == 'am'
start_pm = txt.split(start_time_str, 1)[1][0:2].lower() == 'pm'
end_am = txt.split(end_time_str, 1)[1][0:2].lower() == 'am'
end_pm = txt.split(end_time_str, 1)[1][0:2].lower() == 'pm'
am_pm = ''
# do it this way because the times are not consistently marked
if start_am and end_pm:
am_pm = 'midday'
start_time = strptime(start_time_str, '%H:%M')
end_time = strptime(end_time_str+'pm', '%I:%M%p')
elif start_pm and end_am:
am_pm = 'midnight'
start_time = strptime(start_time_str+'pm', '%I:%M%p')
end_time = strptime(end_time_str, '%H:%M%')
elif start_am or end_am:
am_pm = 'AM'
start_time = strptime(start_time_str, '%H:%M')
end_time = strptime(end_time_str, '%H:%M')
elif start_pm or end_pm:
am_pm = 'PM'
start_time = strptime(start_time_str+'pm', '%I:%M%p')
end_time = strptime(end_time_str+'pm', '%I:%M%p')
return {
'start_time': start_time,
'end_time' : end_time,
'am_pm' : am_pm,
'activity': txt.split(times[1])[-1].split('\n')[1],
}
return None
# add some functionality to find am/pm/md
day_time_dict = {}
for key in days_dict:
day_time_dict[key] = []
for string in days_dict[key]:
if parse_inner(string) is not None:
day_time_dict[key].append(parse_inner(string))
for day in day_time_dict:
print day
for entry in day_time_dict[day]:
print '%s starts at %s and ends at %s.' % (
entry['activity'],
strftime('%I:%M%p', entry['start_time']),
strftime('%I:%M%p', entry['end_time'])
)
print 'thanks for playing!'
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
# TODO(mdan): Remove when updating the API.
@tf_export('autograph.experimental.Verbosity')
class Verbosity(enum.IntEnum):
"""Represents conversion verbosity levels.
Attributes:
BRIEF: No logging, minimal error messages.
VERBOSE: Detailed logging of generated code, detailed error messages.
"""
BRIEF = 0
VERBOSE = 1
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""Represents conversion options that can be toggled on or off.
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
ERROR_REWRITING: Rewrite errors that occur in the generated code to
indicate the source code to which the failing code corresponds.
LISTS: Convert list idioms, like initializers, slices, append, etc.
LOGICAL_EXPRESSIONS: Convert data-dependent logical expressions applied to
Tensors to their TF counterparts.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
ERROR_REWRITING = 'ERROR_REWRITING'
LISTS = 'LISTS'
LOGICAL_EXPRESSIONS = 'LOGICAL_EXPRESSIONS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
force_conversion: bool, whether to force convertinng the target entity. When
force_conversion is turned off, the converter may decide to return the
function as-is.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
force_conversion=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.force_conversion = force_conversion
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def to_ast(self, internal_convert_user_code=None):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Args:
internal_convert_user_code: Optional[bool], allows ovrriding the
corresponding value.
Returns:
ast.Node
"""
template = """
ag__.ConversionOptions(
recursive=recursive_val,
force_conversion=force_conversion_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
if internal_convert_user_code is None:
internal_convert_user_code = self.internal_convert_user_code
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
force_conversion_val=parser.parse_expression(
str(self.force_conversion)),
internal_convert_user_code_val=parser.parse_expression(
str(internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
required_imports: str, containing an import statement on each line. These
are all the imports necessary for the compiled code to run, in addition to
the closures of each entity, which are attached dynamically.
"""
def __init__(
self,
options,
autograph_module,
):
self.options = options
self.autograph_module = autograph_module
@property
def required_imports(self):
"""Returns a block containing all imports required by the converted code."""
# TODO(mdan): Check that these don't clobber one another.
return '\n'.join(config.COMPILED_IMPORT_STATEMENTS)
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
node = live_values.resolve(node, context, config.PYTHON_LITERALS)
node = type_info.resolve(node, context)
# This second call allows resolving first-order class attributes.
node = live_values.resolve(node, context, config.PYTHON_LITERALS)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 00:51:20 2015
module to transform raw vtk to rotated vtk along mom bud axis
@author: sweel
"""
import os
import os.path as op
import numpy as np
from mayavi import mlab
import pandas as pd
from tvtk.api import tvtk
from mombud.vtk_viz import vtkvizfuncs as vz
import wrappers as wr
# pylint: disable=C0103
datadir = op.join(os.getcwd(), 'data')
rawdir = op.join(os.getcwd(), 'output')
# filelist and graph list
vtkF = wr.ddwalk(op.join(rawdir, 'normSkel'),
'*skeleton.vtk', start=5, stop=-13)
mombud = wr.swalk(op.join(datadir, 'csv'),
'YP*csv', stop=-4)
filekeys = {item: vtkF[media][item] for media
in sorted(vtkF.keys()) for item
in sorted(vtkF[media].keys())}
DataSize = pd.read_table(op.join(datadir, 'csv', 'Results.txt'))
df = DataSize.ix[:, 1:]
df['cell'] = df.ix[:, 'Label'].apply(lambda x: x.partition(':')[2])
df['vol'] = 4 / 3 * np.pi * (df.Major * .055 / 2) * (df.Minor * .055 / 2) ** 2
# Draw cell using cellplot and edgeplot
if __name__ == "__main__":
dfmb = pd.DataFrame(columns=['base', 'neck', 'tip', 'media'])
mlab.close(all=True)
for key in sorted(mombud.keys()[-5:-3]):
df1 = pd.read_csv(op.join(datadir, 'csv', '%s.csv' % key),
header=0,
names=['x', 'y', 'z'],
index_col=0)
tip = np.array(df1.ix['tip'])
base = np.array(df1.ix['base'])
neck = np.array(df1.ix['neck'])
filekey = key
df2 = vz.getelipspar(filekey, df)
df2 = df2.sort_values('vol')
df2.reset_index(drop=True, inplace=True)
df2.index = ['bud', 'mom']
df2['center'] = zip((df2.X - 25) * .055, (225 - df2.Y) * .055)
figone = mlab.figure(figure=filekey,
size=(800, 600),
bgcolor=(0., 0., 0.))
figone.scene.disable_render = True
vtkobj, tubeout = vz.cellplot(figone, filekeys[filekey])
xmin, xmax, ymin, ymax, zmin, zmax = vtkobj.outputs[0].bounds
# zposition of center slice
try:
zp = df1.ix['centerpt'][0]
except KeyError:
zp = (zmax - zmin) / 2
vz.adjustlut(tubeout)
vz.drawelips('mom', df2, zpos=zp)
vz.drawelips('bud', df2, zpos=zp)
# get orientation vector defining mom bud axis
tr, rot, scale1 = vz.arrowvect(base, tip, neck)
arrsource = tvtk.ArrowSource(shaft_radius=.01,
shaft_resolution=18,
tip_length=.15,
tip_radius=.05,
tip_resolution=18)
transformPD = tvtk.TransformPolyDataFilter()
transformPD = tvtk.TransformPolyDataFilter(input=arrsource.output,
transform=tr)
# All the transformations objects
# ccw 90 rotation and TR to mother bud coord system (for 2nd arrow)
ccw90 = np.eye(4)
ccw90[0, 0] = 0
ccw90[1, 1] = 0
ccw90[0, 1] = -1
ccw90[1, 0] = 1
trans1 = tvtk.Transform()
trans1.set_matrix(ccw90.flatten())
trans1.scale(1 / 3., 1 / 3., 1 / 3.)
trans1.post_multiply()
trans1.concatenate(tr)
# inverse transfrom from mother bud coords to cartesian coord
trans2 = tvtk.Transform()
rot.transpose()
trans2.translate(-base)
trans2.post_multiply() # translate, THEN rotate
trans2.concatenate(rot)
trans2.translate([-1, 0, 0])
# transform to scale and translate default arrowsource
trans3 = tvtk.Transform()
trans3.scale(scale1, scale1, scale1)
trans3.post_multiply()
trans3.translate([-1, 0, 0])
# transform for second arrow (rotates 90ccw) at origin
trans4 = tvtk.Transform()
trans4.scale(scale1 / 3, scale1 / 3, scale1 / 3)
trans4.post_multiply()
trans4.concatenate(ccw90.flatten())
trans4.translate([-1, 0, 0])
# Draw all the transformed data
# mother bud axis arrow in mother bud coord system
arr_mombud = mlab.pipeline.surface(transformPD.output,
figure=figone,
opacity=.33)
# second arrow, perpendicular to arr_mombud
a2act = mlab.pipeline.surface(arrsource.output,
figure=figone,
opacity=.33)
a2act.actor.actor.user_transform = trans1
tippt = tvtk.SphereSource(center=tip, radius=.15)
mlab.pipeline.surface(tippt.output,
figure=figone,
color=(.3, 1., .3),
opacity=.33)
basept = tvtk.SphereSource(center=base, radius=.15)
mlab.pipeline.surface(basept.output,
figure=figone,
color=(.1, .3, 1),
opacity=.33)
neckpt = tvtk.SphereSource(center=neck, radius=.15)
mlab.pipeline.surface(neckpt.output,
figure=figone,
color=(1, .1, .1),
opacity=.33)
cell_t = tvtk.TransformPolyDataFilter(input=vtkobj.outputs[0],
transform=trans2).output
mom_t, _ = vz.drawelips('mom', df2, zpos=zp)
bud_t, _ = vz.drawelips('bud', df2, zpos=zp)
mom_t.actor.actor.user_transform = trans2
bud_t.actor.actor.user_transform = trans2
# transform the arrows and spheres in mombud axis coords back to origin
arr_mombud_t = mlab.pipeline.surface(arrsource.output,
figure=figone,
opacity=0.33)
arr_mombud_t.actor.actor.user_transform = trans3
a2act_t = mlab.pipeline.surface(arrsource.output,
figure=figone,
opacity=0.33)
a2act_t.actor.actor.user_transform = trans4
base_t = mlab.pipeline.surface(basept.output,
figure=figone,
color=(.1, .3, 1),
opacity=0.33)
tip_t = mlab.pipeline.surface(tippt.output,
figure=figone,
opacity=0.33,
color=(.3, 1., .3))
neck_t = mlab.pipeline.surface(neckpt.output,
figure=figone,
color=(1, .1, .1),
opacity=.33)
neck_t.actor.actor.user_transform = trans2
base_t.actor.actor.user_transform = trans2
tip_t.actor.actor.user_transform = trans2
dftemp = pd.Series({'base': base_t.actor.actor.center,
'neck': neck_t.actor.actor.center,
'tip': tip_t.actor.actor.center,
'media': key[:3],
'bud': df2.ix['bud', 'vol'],
'mom': df2.ix['mom', 'vol']},
name=key)
# mlab.close(all=True)
dfmb = dfmb.append(dftemp)
# THIS IS THE TRANSFORMED CELL VTK POLYDATA THAT WE WANT!!
cell_t2 = mlab.pipeline.surface(cell_t, figure=figone)
cell_t2.actor.mapper.scalar_visibility = True
cell_t2.module_manager.lut_data_mode = 'point data'
vz.adjustlut(cell_t2)
t2tube = mlab.pipeline.tube(cell_t2, figure=figone)
t2tube.filter.radius = .07
t2surfTube = mlab.pipeline.surface(t2tube)
t2surfTube.actor.mapper.scalar_visibility = True
vz.adjustlut(t2surfTube)
figone.scene.disable_render = False
mlab.view(0, 0, 180)
mlab.view(distance='auto')
# rotated vtk coordinate files
# w = tvtk.PolyDataWriter(input=cell_t, file_name='%s.vtk' % key)
# w.write()
# with open(op.join(datadir,
# 'transformedData',
# 'mombudtrans.pkl'), 'wb') as output:
# pickle.dump(dfmb, output)
|
|
"""Utilities to help with developing using bcbio inside of docker.
"""
import copy
import datetime
import glob
import math
import os
import shutil
import subprocess
import sys
import boto
import numpy
import yaml
from bcbio import utils
from bcbio.distributed import objectstore
from bcbio.pipeline import genome
from bcbio.provenance import do
from bcbiovm.aws import common
from bcbiovm.docker import defaults, install, manage, mounts
# default information about docker container
DOCKER = {"port": 8085,
"biodata_dir": "/usr/local/share/bcbio-nextgen",
"work_dir": "/mnt/work",
"image_url": "https://s3.amazonaws.com/bcbio_nextgen/bcbio-nextgen-docker-image.gz"}
def add_biodata_args(parser):
"""Add standard arguments for preparing biological data to a command line arg parser.
"""
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[],
choices=["GRCh37", "hg19", "mm10", "mm9", "rn5", "canFam3", "dm3", "Zv9", "phix",
"sacCer3", "xenTro3", "TAIR10", "WBcel235"])
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=["bowtie", "bowtie2", "bwa", "novoalign", "star", "ucsc"])
return parser
def setup_cmd(subparsers):
parser = subparsers.add_parser("devel", help="Utilities to help with develping using bcbion inside of docker")
psub = parser.add_subparsers(title="[devel commands]")
iparser = psub.add_parser("setup_install", help="Run a python setup.py install inside of the current directory")
iparser.add_argument("-i", "--image", help="Image name to write updates to",
default=install.DEFAULT_IMAGE)
iparser.set_defaults(func=_run_setup_install)
sparser = psub.add_parser("system", help="Update bcbio system file with a given core and memory/core target")
sparser.add_argument("cores", help="Target cores to use for multi-core processes")
sparser.add_argument("memory", help="Target memory per core, in Mb (1000 = 1Gb)")
sparser.set_defaults(func=_run_system_update)
dparser = psub.add_parser("biodata", help="Upload pre-prepared biological data to cache")
dparser.add_argument("--prepped", help="Start with an existing set of cached data to output directory.")
dparser = add_biodata_args(dparser)
dparser.set_defaults(func=_run_biodata_upload)
dbparser = psub.add_parser("dockerbuild", help="Build docker image and export to S3")
dbparser.add_argument("-b", "--bucket", default="bcbio_nextgen",
help="S3 bucket to upload the gzipped docker image to")
dbparser.add_argument("-t", "--buildtype", default="full", choices=["full", "code"],
help=("Type of docker build to do. full is all code and third party tools. "
"code is only bcbio-nextgen code."))
dbparser.add_argument("-d", "--rundir", default="/tmp/bcbio-docker-build",
help="Directory to run docker build in")
parser.add_argument("-q", "--quiet", dest="verbose", action="store_false", default=True,
help="Quiet output when running Ansible playbooks")
dbparser.set_defaults(func=_run_docker_build)
# ## Install code to docker image
def _run_setup_install(args):
"""Install python code from a bcbio-nextgen development tree inside of docker.
"""
bmounts = ["-v", "%s:%s" % (os.getcwd(), "/tmp/bcbio-nextgen")]
cmd = ["docker", "run", "-i", "-d", "--net=host"] + bmounts + [args.image] + \
["bash", "-l", "-c",
("rm -rf /usr/local/share/bcbio-nextgen/anaconda/lib/python2.7/site-packages/bcbio && "
"cd /tmp/bcbio-nextgen && "
"/usr/local/share/bcbio-nextgen/anaconda/bin/python setup.py install")]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
cid = process.communicate()[0].strip()
do.run(["docker", "attach", "--no-stdin", cid], "Running in docker container: %s" % cid,
log_stdout=True)
subprocess.check_call(["docker", "commit", cid, args.image])
subprocess.check_call(["docker", "rm", cid], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print("Updated bcbio-nextgen install in docker container: %s" % args.image)
# ## Update bcbio_system.yaml
def _run_system_update(args):
"""Update bcbio_system.yaml file with a given target of cores and memory.
"""
mem_types = set(["memory", "jvm_opts"])
args = defaults.update_check_args(args, "Could not do upgrade of bcbio_system.yaml")
system_file = os.path.join(args.datadir, "galaxy", "bcbio_system.yaml")
with open(system_file) as in_handle:
config = yaml.safe_load(in_handle)
out = copy.deepcopy(config)
mems = []
for attrs in config.get("resources", {}).itervalues():
for key, value in attrs.iteritems():
if key in mem_types:
mems.append((key, value))
common_mem = _calculate_common_memory(mems)
for prog, attrs in config.get("resources", {}).iteritems():
for key, value in attrs.iteritems():
if key == "cores":
out['resources'][prog][key] = int(args.cores)
elif key in mem_types:
out["resources"][prog][key] = _update_memory(key, value, args.memory,
common_mem)
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.move(system_file, bak_file)
with open(system_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
def _get_cur_mem(key, val):
if key == "memory":
cur_mem = val
elif key == "jvm_opts":
cur_mem = val[1].replace("-Xmx", "")
cur_val = int(cur_mem[:-1])
cur_mod = cur_mem[-1:]
if cur_mod.lower() == "g":
cur_val = cur_val * 1000
else:
assert cur_mod.lower() == "m"
return cur_val, cur_mod
def _calculate_common_memory(kvs):
"""Get the median memory specification, in megabytes.
"""
mems = []
for key, val in kvs:
cur_val, _ = _get_cur_mem(key, val)
mems.append(cur_val)
return numpy.median(mems)
def _update_memory(key, cur, target, common_mem):
"""Update memory specifications to match target.
Handles JVM options and both megabyte and gigabyte specifications.
`target` is in megabytes. Does not adjust down memory that is more
than 1.5x the current common memory setting, assuming these are pre-set for
higher memory requirements.
"""
mod_swap = {"G": "M", "g": "m"}
cur_mem, orig_mod = _get_cur_mem(key, cur)
if cur_mem >= common_mem * 1.5:
return cur
else:
new_val = "%s%s" % (target, mod_swap.get(orig_mod, orig_mod))
if key == "jvm_opts":
out = cur
out[-1] = "-Xmx%s" % new_val
else:
out = new_val
return out
# ## Build docker images
def _run_docker_build(args):
playbook = os.path.join(common.ANSIBLE_BASE, "bcbio_vm_docker_local.yml")
inventory_path = os.path.join(common.ANSIBLE_BASE, "standard_hosts.txt")
def _setup_args(args, cluster_config):
return {"bcbio_bucket": args.bucket, "docker_buildtype": args.buildtype,
"bcbio_dir": args.rundir}
common.run_ansible_pb(inventory_path, playbook, args, _setup_args)
# ## Upload pre-build biological data
def _run_biodata_upload(args):
"""Manage preparation of biodata on a local machine, uploading to S3 in pieces.
"""
args = defaults.update_check_args(args, "biodata not uploaded")
args = install.docker_image_arg(args)
for gbuild in args.genomes:
print("Preparing %s" % gbuild)
if args.prepped:
for target in ["samtools"] + args.aligners:
genome.download_prepped_genome(gbuild, {}, target, False, args.prepped)
print("Downloaded prepped %s to %s. Edit and re-run without --prepped to upload"
% (gbuild, args.prepped))
return
cl = ["upgrade", "--genomes", gbuild]
for a in args.aligners:
cl += ["--aligners", a]
dmounts = mounts.prepare_system(args.datadir, DOCKER["biodata_dir"])
manage.run_bcbio_cmd(args.image, dmounts, cl)
print("Uploading %s" % gbuild)
gdir = _get_basedir(args.datadir, gbuild)
basedir, genomedir = os.path.split(gdir)
assert genomedir == gbuild
with utils.chdir(basedir):
all_dirs = sorted(os.listdir(gbuild))
_upload_biodata(gbuild, "seq", all_dirs)
for aligner in args.aligners:
_upload_biodata(gbuild, genome.REMAP_NAMES.get(aligner, aligner), all_dirs)
def _upload_biodata(gbuild, target, all_dirs):
"""Upload biodata for a specific genome build and target to S3.
"""
if target == "seq":
want_dirs = set(["rnaseq", "seq", "variation", "vep", "snpeff"])
target_dirs = [x for x in all_dirs if (x.startswith("rnaseq-") or x in want_dirs)]
else:
target_dirs = [x for x in all_dirs if x == target]
target_dirs = [os.path.join(gbuild, x) for x in target_dirs]
fname = objectstore.BIODATA_INFO["s3"].format(build=gbuild, target=target)
remotef = objectstore.parse_remote(fname)
conn = objectstore.connect(fname)
bucket = conn.get_bucket(remotef.bucket)
key = bucket.get_key(remotef.key)
if not key:
keyname = remotef.key
bucketname = remotef.bucket
target_dirs = " ".join(target_dirs)
cmd = ("tar -cvpf - {target_dirs} | pigz -c | "
"gof3r put --no-md5 -k {keyname} -b {bucketname} "
"-m x-amz-storage-class:REDUCED_REDUNDANCY -m x-amz-acl:public-read")
do.run(cmd.format(**locals()), "Upload pre-prepared genome data: %s %s" % (gbuild, target))
def _get_basedir(datadir, target_genome):
"""Retrieve base directory for uploading.
"""
genome_dir = os.path.join(datadir, "genomes")
for dirname in glob.glob(os.path.join(genome_dir, "*", "*")):
if dirname.endswith("/%s" % target_genome):
return dirname
|
|
# -*- coding: utf-8 -*-
"""Platforms.
Utilities dealing with platform specifics: signals, daemonization,
users, groups, and so on.
"""
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import math
import numbers
import os
import platform as _platform
import signal as _signal
import sys
import warnings
from collections import namedtuple
from contextlib import contextmanager
from billiard.compat import close_open_fds, get_fdmax
# fileno used to be in this module
from kombu.utils.compat import maybe_fileno
from kombu.utils.encoding import safe_str
from .exceptions import SecurityError
from .five import items, reraise, string_t
from .local import try_import
try:
from billiard.process import current_process
except ImportError: # pragma: no cover
current_process = None
_setproctitle = try_import('setproctitle')
resource = try_import('resource')
pwd = try_import('pwd')
grp = try_import('grp')
mputil = try_import('multiprocessing.util')
__all__ = (
'EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
'IS_macOS', 'IS_WINDOWS', 'SIGMAP', 'pyimplementation', 'LockFailed',
'get_fdmax', 'Pidfile', 'create_pidlock', 'close_open_fds',
'DaemonContext', 'detached', 'parse_uid', 'parse_gid', 'setgroups',
'initgroups', 'setgid', 'setuid', 'maybe_drop_privileges', 'signals',
'signal_name', 'set_process_title', 'set_mp_process_title',
'get_errno_name', 'ignore_errno', 'fd_by_path', 'isatty',
)
# exitcodes
EX_OK = getattr(os, 'EX_OK', 0)
EX_FAILURE = 1
EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
EX_USAGE = getattr(os, 'EX_USAGE', 64)
EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
SYSTEM = _platform.system()
IS_macOS = SYSTEM == 'Darwin'
IS_WINDOWS = SYSTEM == 'Windows'
DAEMON_WORKDIR = '/'
PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))
PIDLOCKED = """ERROR: Pidfile ({0}) already exists.
Seems we're already running? (pid: {1})"""
_range = namedtuple('_range', ('start', 'stop'))
C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False)
ROOT_DISALLOWED = """\
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!
If you really want to continue then you have to set the C_FORCE_ROOT
environment variable (but please think about this before you do).
User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""
ROOT_DISCOURAGED = """\
You're running the worker with superuser privileges: this is
absolutely not recommended!
Please specify a different user using the --uid option.
User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""
SIGNAMES = {
sig for sig in dir(_signal)
if sig.startswith('SIG') and '_' not in sig
}
SIGMAP = {getattr(_signal, name): name for name in SIGNAMES}
def isatty(fh):
"""Return true if the process has a controlling terminal."""
try:
return fh.isatty()
except AttributeError:
pass
def pyimplementation():
"""Return string identifying the current Python implementation."""
if hasattr(_platform, 'python_implementation'):
return _platform.python_implementation()
elif sys.platform.startswith('java'):
return 'Jython ' + sys.platform
elif hasattr(sys, 'pypy_version_info'):
v = '.'.join(str(p) for p in sys.pypy_version_info[:3])
if sys.pypy_version_info[3:]:
v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])
return 'PyPy ' + v
else:
return 'CPython'
class LockFailed(Exception):
"""Raised if a PID lock can't be acquired."""
class Pidfile(object):
"""Pidfile.
This is the type returned by :func:`create_pidlock`.
See Also:
Best practice is to not use this directly but rather use
the :func:`create_pidlock` function instead:
more convenient and also removes stale pidfiles (when
the process holding the lock is no longer running).
"""
#: Path to the pid lock file.
path = None
def __init__(self, path):
self.path = os.path.abspath(path)
def acquire(self):
"""Acquire lock."""
try:
self.write_pid()
except OSError as exc:
reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])
return self
__enter__ = acquire
def is_locked(self):
"""Return true if the pid lock exists."""
return os.path.exists(self.path)
def release(self, *args):
"""Release lock."""
self.remove()
__exit__ = release
def read_pid(self):
"""Read and return the current pid."""
with ignore_errno('ENOENT'):
with open(self.path, 'r') as fh:
line = fh.readline()
if line.strip() == line: # must contain '\n'
raise ValueError(
'Partial or invalid pidfile {0.path}'.format(self))
try:
return int(line.strip())
except ValueError:
raise ValueError(
'pidfile {0.path} contents invalid.'.format(self))
def remove(self):
"""Remove the lock."""
with ignore_errno(errno.ENOENT, errno.EACCES):
os.unlink(self.path)
def remove_if_stale(self):
"""Remove the lock if the process isn't running.
I.e. process does not respons to signal.
"""
try:
pid = self.read_pid()
except ValueError as exc:
print('Broken pidfile found - Removing it.', file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True
try:
os.kill(pid, 0)
except os.error as exc:
if exc.errno == errno.ESRCH:
print('Stale pidfile exists - Removing it.', file=sys.stderr)
self.remove()
return True
return False
def write_pid(self):
pid = os.getpid()
content = '{0}\n'.format(pid)
pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
pidfile = os.fdopen(pidfile_fd, 'w')
try:
pidfile.write(content)
# flush and sync so that the re-read below works.
pidfile.flush()
try:
os.fsync(pidfile_fd)
except AttributeError: # pragma: no cover
pass
finally:
pidfile.close()
rfh = open(self.path)
try:
if rfh.read() != content:
raise LockFailed(
"Inconsistency: Pidfile content doesn't match at re-read")
finally:
rfh.close()
PIDFile = Pidfile # noqa: E305 XXX compat alias
def create_pidlock(pidfile):
"""Create and verify pidfile.
If the pidfile already exists the program exits with an error message,
however if the process it refers to isn't running anymore, the pidfile
is deleted and the program continues.
This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.
Returns:
Pidfile: used to manage the lock.
Example:
>>> pidlock = create_pidlock('/var/run/app.pid')
"""
pidlock = _create_pidlock(pidfile)
atexit.register(pidlock.release)
return pidlock
def _create_pidlock(pidfile):
pidlock = Pidfile(pidfile)
if pidlock.is_locked() and not pidlock.remove_if_stale():
print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
raise SystemExit(EX_CANTCREAT)
pidlock.acquire()
return pidlock
def fd_by_path(paths):
"""Return a list of file descriptors.
This method returns list of file descriptors corresponding to
file paths passed in paths variable.
Arguments:
paths: List[str]: List of file paths.
Returns:
List[int]: List of file descriptors.
Example:
>>> keep = fd_by_path(['/dev/urandom', '/my/precious/'])
"""
stats = set()
for path in paths:
try:
fd = os.open(path, os.O_RDONLY)
except OSError:
continue
try:
stats.add(os.fstat(fd)[1:3])
finally:
os.close(fd)
def fd_in_stats(fd):
try:
return os.fstat(fd)[1:3] in stats
except OSError:
return False
return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)]
class DaemonContext(object):
"""Context manager daemonizing the process."""
_is_open = False
def __init__(self, pidfile=None, workdir=None, umask=None,
fake=False, after_chdir=None, after_forkers=True,
**kwargs):
if isinstance(umask, string_t):
# octal or decimal, depending on initial zero.
umask = int(umask, 8 if umask.startswith('0') else 10)
self.workdir = workdir or DAEMON_WORKDIR
self.umask = umask
self.fake = fake
self.after_chdir = after_chdir
self.after_forkers = after_forkers
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
def redirect_to_null(self, fd):
if fd is not None:
dest = os.open(os.devnull, os.O_RDWR)
os.dup2(dest, fd)
def open(self):
if not self._is_open:
if not self.fake:
self._detach()
os.chdir(self.workdir)
if self.umask is not None:
os.umask(self.umask)
if self.after_chdir:
self.after_chdir()
if not self.fake:
# We need to keep /dev/urandom from closing because
# shelve needs it, and Beat needs shelve to start.
keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])
close_open_fds(keep)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
if self.after_forkers and mputil is not None:
mputil._run_after_forkers()
self._is_open = True
__enter__ = open
def close(self, *args):
if self._is_open:
self._is_open = False
__exit__ = close
def _detach(self):
if os.fork() == 0: # first child
os.setsid() # create new session
if os.fork() > 0: # pragma: no cover
# second child
os._exit(0)
else:
os._exit(0)
return self
def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).
Arguments:
logfile (str): Optional log file.
The ability to write to this file
will be verified before the process is detached.
pidfile (str): Optional pid file.
The pidfile won't be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
uid (int, str): Optional user id or user name to change
effective privileges to.
gid (int, str): Optional group id or group name to change
effective privileges to.
umask (str, int): Optional umask that'll be effective in
the child process.
workdir (str): Optional new working directory.
fake (bool): Don't actually detach, intended for debugging purposes.
**opts (Any): Ignored.
Example:
>>> from celery.platforms import detached, create_pidlock
>>> with detached(
... logfile='/var/log/app.log',
... pidfile='/var/run/app.pid',
... uid='nobody'):
... # Now in detached child process with effective user set to nobody,
... # and we know that our logfile can be written to, and that
... # the pidfile isn't locked.
... pidlock = create_pidlock('/var/run/app.pid')
...
... # Run the program
... program.run(logfile='/var/log/app.log')
"""
if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir
signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)
def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()
return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
)
def parse_uid(uid):
"""Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError('User does not exist: {0}'.format(uid))
def parse_gid(gid):
"""Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError('Group does not exist: {0}'.format(gid))
def _setgroups_hack(groups):
# :fun:`setgroups` may have a platform-dependent limit,
# and it's not always possible to know in advance what this limit
# is, so we use this ugly hack stolen from glibc.
groups = groups[:]
while 1:
try:
return os.setgroups(groups)
except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
except OSError as exc: # error from the OS.
if exc.errno != errno.EINVAL or len(groups) <= 1:
raise
groups[:] = groups[:-1]
def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception: # pylint: disable=broad-except
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise
def initgroups(uid, gid):
"""Init process group permissions.
Compat version of :func:`os.initgroups` that was first
added to Python 2.7.
"""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups)
def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid))
def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid))
def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed.
"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise SecurityError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)
if uid:
_setuid(uid, gid)
else:
gid and setgid(gid)
if uid and not os.getuid() and not os.geteuid():
raise SecurityError('Still root uid after drop privileges!')
if gid and not os.getgid() and not os.getegid():
raise SecurityError('Still root gid after drop privileges!')
def _setuid(uid, gid):
# If GID isn't defined, get the primary GID of the user.
if not gid and pwd:
gid = pwd.getpwuid(uid).pw_gid
# Must set the GID before initgroups(), as setgid()
# is known to zap the group list on some platforms.
# setgid must happen before setuid (otherwise the setgid operation
# may fail because of insufficient privileges and possibly stay
# in a privileged group).
setgid(gid)
initgroups(uid, gid)
# at last:
setuid(uid)
# ... and make sure privileges cannot be restored:
try:
setuid(0)
except OSError as exc:
if exc.errno != errno.EPERM:
raise
# we should get here: cannot restore privileges,
# everything was fine.
else:
raise SecurityError(
'non-root user able to restore privileges after setuid.')
class Signals(object):
"""Convenience interface to :mod:`signals`.
If the requested signal isn't supported on the current platform,
the operation will be ignored.
Example:
>>> from celery.platforms import signals
>>> from proj.handlers import my_handler
>>> signals['INT'] = my_handler
>>> signals['INT']
my_handler
>>> signals.supported('INT')
True
>>> signals.signum('INT')
2
>>> signals.ignore('USR1')
>>> signals['USR1'] == signals.ignored
True
>>> signals.reset('USR1')
>>> signals['USR1'] == signals.default
True
>>> from proj.handlers import exit_handler, hup_handler
>>> signals.update(INT=exit_handler,
... TERM=exit_handler,
... HUP=hup_handler)
"""
ignored = _signal.SIG_IGN
default = _signal.SIG_DFL
if hasattr(_signal, 'setitimer'):
def arm_alarm(self, seconds):
_signal.setitimer(_signal.ITIMER_REAL, seconds)
else: # pragma: no cover
try:
from itimer import alarm as _itimer_alarm # noqa
except ImportError:
def arm_alarm(self, seconds): # noqa
_signal.alarm(math.ceil(seconds))
else: # pragma: no cover
def arm_alarm(self, seconds): # noqa
return _itimer_alarm(seconds) # noqa
def reset_alarm(self):
return _signal.alarm(0)
def supported(self, name):
"""Return true value if signal by ``name`` exists on this platform."""
try:
self.signum(name)
except AttributeError:
return False
else:
return True
def signum(self, name):
"""Get signal number by name."""
if isinstance(name, numbers.Integral):
return name
if not isinstance(name, string_t) \
or not name.isupper():
raise TypeError('signal name must be uppercase string.')
if not name.startswith('SIG'):
name = 'SIG' + name
return getattr(_signal, name)
def reset(self, *signal_names):
"""Reset signals to the default signal handler.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.default) for sig in signal_names)
def ignore(self, *names):
"""Ignore signal using :const:`SIG_IGN`.
Does nothing if the platform has no support for signals,
or the specified signal in particular.
"""
self.update((sig, self.ignored) for sig in names)
def __getitem__(self, name):
return _signal.getsignal(self.signum(name))
def __setitem__(self, name, handler):
"""Install signal handler.
Does nothing if the current platform has no support for signals,
or the specified signal in particular.
"""
try:
_signal.signal(self.signum(name), handler)
except (AttributeError, ValueError):
pass
def update(self, _d_=None, **sigmap):
"""Set signal handlers from a mapping."""
for name, handler in items(dict(_d_ or {}, **sigmap)):
self[name] = handler
signals = Signals()
get_signal = signals.signum # compat
install_signal_handler = signals.__setitem__ # compat
reset_signal = signals.reset # compat
ignore_signal = signals.ignore # compat
def signal_name(signum):
"""Return name of signal from signal number."""
return SIGMAP[signum][3:]
def strargv(argv):
arg_start = 2 if 'manage' in argv[0] else 1
if len(argv) > arg_start:
return ' '.join(argv[arg_start:])
return ''
def set_process_title(progname, info=None):
"""Set the :command:`ps` name for the currently running process.
Only works if :pypi:`setproctitle` is installed.
"""
proctitle = '[{0}]'.format(progname)
proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle
if _setproctitle:
_setproctitle.setproctitle(safe_str(proctitle))
return proctitle
if os.environ.get('NOSETPS'): # pragma: no cover
def set_mp_process_title(*a, **k):
"""Disabled feature."""
else:
def set_mp_process_title(progname, info=None, hostname=None): # noqa
"""Set the :command:`ps` name from the current process name.
Only works if :pypi:`setproctitle` is installed.
"""
if hostname:
progname = '{0}: {1}'.format(progname, hostname)
name = current_process().name if current_process else 'MainProcess'
return set_process_title('{0}:{1}'.format(progname, name), info=info)
def get_errno_name(n):
"""Get errno for string (e.g., ``ENOENT``)."""
if isinstance(n, string_t):
return getattr(errno, n)
return n
@contextmanager
def ignore_errno(*errnos, **kwargs):
"""Context manager to ignore specific POSIX error codes.
Takes a list of error codes to ignore: this can be either
the name of the code, or the code integer itself::
>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()
>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass
Arguments:
types (Tuple[Exception]): A tuple of exceptions to ignore
(when the errno matches). Defaults to :exc:`Exception`.
"""
types = kwargs.get('types') or (Exception,)
errnos = [get_errno_name(errno) for errno in errnos]
try:
yield
except types as exc:
if not hasattr(exc, 'errno'):
raise
if exc.errno not in errnos:
raise
def check_privileges(accept_content):
uid = os.getuid() if hasattr(os, 'getuid') else 65535
gid = os.getgid() if hasattr(os, 'getgid') else 65535
euid = os.geteuid() if hasattr(os, 'geteuid') else 65535
egid = os.getegid() if hasattr(os, 'getegid') else 65535
if hasattr(os, 'fchown'):
if not all(hasattr(os, attr)
for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):
raise SecurityError('suspicious platform, contact support')
if not uid or not gid or not euid or not egid:
if ('pickle' in accept_content or
'application/x-python-serialize' in accept_content):
if not C_FORCE_ROOT:
try:
print(ROOT_DISALLOWED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
), file=sys.stderr)
finally:
sys.stderr.flush()
os._exit(1)
warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
)))
|
|
"""Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No
transformation properties are implemented in these Base objects, but
implicit contraction of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer
expressions as indices is limited. (This should be improved in
future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = symbols('i j', cls=Idx)
>>> M[i, j]
M[i, j]
Repeated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
x[j]*M[i, j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of its indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
(m, n)
>>> M[i, j].ranges
[(0, m - 1), (0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[(0, m - 1), None, (0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from __future__ import print_function, division
from sympy.core import Expr, Tuple, Symbol, sympify, S
from sympy.core.compatibility import is_sequence, string_types, NotIterable, range
class IndexException(Exception):
pass
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = True
def __new__(cls, base, *args):
from sympy.utilities.misc import filldedent
if not args:
raise IndexException("Indexed needs at least one index.")
if isinstance(base, (string_types, Symbol)):
base = IndexedBase(base)
elif not isinstance(base, IndexedBase):
raise TypeError(filldedent("""
Indexed expects string, Symbol or IndexedBase as base."""))
args = list(map(sympify, args))
return Expr.__new__(cls, base, *args)
@property
def base(self):
"""Returns the IndexedBase of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).base
A
>>> B = IndexedBase('B')
>>> B == B[i, j].base
True
"""
return self.args[0]
@property
def indices(self):
"""
Returns the indices of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j = symbols('i j', cls=Idx)
>>> Indexed('A', i, j).indices
(i, j)
"""
return self.args[1:]
@property
def rank(self):
"""
Returns the rank of the Indexed object.
Examples
========
>>> from sympy.tensor import Indexed, Idx
>>> from sympy import symbols
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
>>> Indexed('A', i, j).rank
2
>>> q = Indexed('A', i, j, k, l, m)
>>> q.rank
5
>>> q.rank == len(q.indices)
True
"""
return len(self.args) - 1
@property
def shape(self):
"""Returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
(n, n)
>>> B[i, j].shape
(m, m)
"""
from sympy.utilities.misc import filldedent
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException(filldedent("""
Range is not defined for all indices in: %s""" % self))
except TypeError:
raise IndexException(filldedent("""
Shape cannot be inferred from Idx with
undefined range: %s""" % self))
@property
def ranges(self):
"""Returns a list of tuples with lower and upper range of each index.
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None`` instead of a tuple.
Examples
========
>>> from sympy import Indexed,Idx, symbols
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
[(0, 1), (0, 3), (0, 7)]
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
[(0, 2), (0, 2), (0, 2)]
>>> x, y, z = symbols('x y z', integer=True)
>>> Indexed('A', x, y, z).ranges
[None, None, None]
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = list(map(p.doprint, self.indices))
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class IndexedBase(Expr, NotIterable):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object receives indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
(o, p)
"""
is_commutative = True
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, string_types):
label = Symbol(label)
elif isinstance(label, Symbol):
pass
else:
raise TypeError("Base label should be a string or Symbol.")
obj = Expr.__new__(cls, label, **kw_args)
if is_sequence(shape):
obj._shape = Tuple(*shape)
else:
obj._shape = sympify(shape)
return obj
@property
def args(self):
"""Returns the arguments used to create this IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).args
(A, (x, y))
"""
if self._shape:
return self._args + (self._shape,)
else:
return self._args
def _hashable_content(self):
return Expr._hashable_content(self) + (self._shape,)
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
"""Returns the shape of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase, Idx, Symbol
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).shape
(x, y)
Note: If the shape of the IndexedBase is specified, it will override
any shape information given by the indices.
>>> A = IndexedBase('A', shape=(x, y))
>>> B = IndexedBase('B')
>>> i = Idx('i', 2)
>>> j = Idx('j', 1)
>>> A[i, j].shape
(x, y)
>>> B[i, j].shape
(2, 1)
"""
return self._shape
@property
def label(self):
"""Returns the label of the IndexedBase object.
Examples
========
>>> from sympy import IndexedBase
>>> from sympy.abc import x, y
>>> IndexedBase('A', shape=(x, y)).label
A
"""
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Idx(Expr):
"""Represents an integer index as an Integer or integer expression.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as a dimension. Lower and
upper bounds are set to 0 and range - 1, respectively.
- tuple: The two elements are interpreted as the lower and upper
bounds of the range, respectively.
Note: the Idx constructor is rather pedantic in that it only accepts
integer arguments. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, e.g. if n is given as an argument then
n.is_integer must return True.
For convenience, if the label is given as a string it is automatically
converted to an integer symbol. (Note: this conversion is not done for
range or dimension arguments.)
Examples
========
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
If a string is given for the label an integer Symbol is created and the
bounds are both None:
>>> idx = Idx('qwerty'); idx
qwerty
>>> idx.lower, idx.upper
(None, None)
Both upper and lower bounds can be specified:
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
When only a single bound is given it is interpreted as the dimension
and the lower bound defaults to 0:
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
The label can be a literal integer instead of a string/Symbol:
>>> idx = Idx(2, n); idx.lower, idx.upper
(0, n - 1)
>>> idx.label
2
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
from sympy.utilities.misc import filldedent
if isinstance(label, string_types):
label = Symbol(label, integer=True)
label, range = list(map(sympify, (label, range)))
if not label.is_integer:
raise TypeError("Idx object requires an integer label.")
elif is_sequence(range):
if len(range) != 2:
raise ValueError(filldedent("""
Idx range tuple must have length 2, but got %s""" % len(range)))
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds.")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension.")
args = label, Tuple(0, range - 1)
elif range:
raise TypeError(filldedent("""
The range must be an ordered iterable or
integer SymPy expression."""))
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the label (Integer or integer expression) of the Idx object.
Examples
========
>>> from sympy import Idx, Symbol
>>> Idx(2).label
2
>>> j = Symbol('j', integer=True)
>>> Idx(j).label
j
>>> Idx(j + 1).label
j + 1
"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).lower
0
>>> Idx('j', 5).lower
0
>>> Idx('j').lower is None
True
"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the Index.
Examples
========
>>> from sympy import Idx
>>> Idx('j', 2).upper
1
>>> Idx('j', 5).upper
4
>>> Idx('j').upper is None
True
"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for dense operator"""
from __future__ import absolute_import as _abs
import tvm
import tvm.autotvm as autotvm
from tvm.contrib import cublas
from .tensor_intrin import dp4a
from ..nn.dense import dense, dense_default
from .. import tag
from .. import generic
from ..util import traverse_inline, get_const_tuple
@autotvm.register_topi_compute(dense, ["cuda", "gpu"], "direct")
def dense_cuda(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for cuda backend.
Parameters
----------
data : tvm.Tensor
2-D with shape [batch, in_dim]
weight : tvm.Tensor
2-D with shape [out_dim, in_dim]
bias : tvm.Tensor, optional
1-D with shape [out_dim]
Returns
-------
output : tvm.Tensor
2-D with shape [batch, out_dim]
"""
# pylint: disable=unused-argument
assert len(data.shape) == 2 and len(weight.shape) == 2, \
"only support 2-dim dense"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = data.shape
out_dim, _ = weight.shape
target = tvm.target.current_target()
if "cublas" in target.libs:
assert out_dtype == data.dtype, "Mixed precision not supported."
matmul = cublas.matmul(data, weight, False, True)
if bias is not None:
matmul = tvm.compute((batch, out_dim), \
lambda i, j: matmul[i, j] + bias[j], \
tag=tag.BROADCAST)
return matmul
return dense_default(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule(generic.schedule_dense, ["cuda", "gpu"], "direct")
def schedule_dense(cfg, outs):
"""Schedule for dense operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for dense.
"""
# pylint: disable=unused-argument
target = tvm.target.current_target()
if target.target_name == "cuda" and "cublas" in target.libs:
return generic.schedule_extern(outs)
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
def _schedule(Dense):
num_thread = 64
k = Dense.op.reduce_axis[0]
ko, kf = s[Dense].split(k, factor=num_thread)
DenseF = s.rfactor(Dense, kf)
if Dense.op in s.outputs:
Out = Dense
else:
Out = outs[0].op.output(0)
s[Dense].compute_at(s[Out], s[Out].op.axis[1])
s[Out].bind(s[Out].op.axis[0], tvm.thread_axis("blockIdx.y"))
s[Out].bind(s[Out].op.axis[1], tvm.thread_axis("blockIdx.x"))
tx = s[Dense].op.reduce_axis[0]
thread_x = tvm.thread_axis("threadIdx.x")
s[Dense].bind(tx, thread_x)
s[DenseF].compute_at(s[Dense], tx)
s[Dense].set_store_predicate(thread_x.var.equal(0))
s[Out].set_store_predicate(thread_x.var.equal(0))
scheduled_ops = []
def traverse(OP):
"""Internal travserse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule dense
elif OP.tag == 'dense':
Dense = OP.output(0)
_schedule(Dense)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
@autotvm.register_topi_compute(dense, ['cuda'], ['int8'])
def dense_int8(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for int8 on CUDA"""
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
k = tvm.reduce_axis((0, in_dim), name='k')
matmul = tvm.compute((batch, out_dim),
lambda i, j: tvm.sum(data[i, k].astype(out_dtype) *
weight[j, k].astype(out_dtype), axis=[k]),
tag="dense_int8")
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = tvm.compute((batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST)
cfg.add_flop(batch * out_dim)
return matmul
@autotvm.register_topi_schedule(generic.schedule_dense, ['cuda', 'gpu'], ['int8'])
def schedule_dense_int8(cfg, outs):
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_int8" in op.tag:
_schedule_dense_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
_dp4a = dp4a('shared', 'shared', 'local')
def _schedule_dense_int8(cfg, s, output):
data, weight = s[output].op.input_tensors
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
in_dim_factor = 4
assert in_dim % in_dim_factor == 0, "Input dimension must divide {}".format(in_dim_factor)
if in_dim % 16 == 0:
in_dim_factor = 16
# create tuning space
cfg.define_split("tile_y", batch, num_outputs=4)
cfg.define_split("tile_x", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim // in_dim_factor, num_outputs=2)
cfg.define_knob('auto_unroll_max_step', [0, 512, 1500])
# create cache stage
AA = s.cache_read(data, 'shared', [output])
WW = s.cache_read(weight, 'shared', [output])
CC = s.cache_write(output, 'local')
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
n, x = s[output].op.axis
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
ko = CC.op.reduce_axis[0]
ko, ki = s[CC].split(ko, factor=4)
ko, kt = cfg['tile_k'].apply(s, CC, ko)
s[CC].tensorize(ki, _dp4a)
by, vy, ty, yi = cfg['tile_y'].apply(s, output, n)
bx, vx, tx, xi = cfg['tile_x'].apply(s, output, x)
s[output].reorder(by, bx, vy, vx, ty, tx, yi, xi)
s[output].bind(by, tvm.thread_axis('blockIdx.y'))
s[output].bind(bx, tvm.thread_axis('blockIdx.x'))
s[output].bind(vy, tvm.thread_axis('vthread'))
s[output].bind(vx, tvm.thread_axis('vthread'))
s[output].bind(ty, tvm.thread_axis('threadIdx.y'))
s[output].bind(tx, tvm.thread_axis('threadIdx.x'))
n_ty = cfg['tile_y'].size[2]
n_tx = cfg['tile_x'].size[2]
s[CC].compute_at(s[output], tx)
yo, xo = CC.op.axis[:2]
s[CC].reorder(ko, kt, yo, xo, ki)
for load in [AA, WW]:
s[load].compute_at(s[CC], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=in_dim_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
s[load].bind(tx, tvm.thread_axis('threadIdx.x'))
s[load].bind(ty, tvm.thread_axis('threadIdx.y'))
s[output].pragma(kernel_scope, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[output].pragma(kernel_scope, 'unroll_explicit', False)
return s
|
|
from datetime import datetime, timedelta
import json
import urllib2
from django.contrib import messages
from django.contrib.auth import authenticate, logout, login
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, render_to_response
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
import tweetpony
from twython import Twython
from mainApp.models import postStatusToFaceBook, MainUser, UserSocialProfile, postStatusToTwitter, SocialMessage
def registration(request):
print(request.user)
return render(request, 'mainApp/registration.html')
def signUpUser(request):
userPass = request.POST['password']
userMail = request.POST['email']
userName = userMail
if userName and userPass and userMail:
user = User.objects.create_user(username=userName, email=userMail, password=userPass)
user.first_name = request.POST['firstName']
user.last_name = request.POST['lastName']
user.save()
mainUser = MainUser(mobile=request.POST['mobile'], user=user)
mainUser.save()
user = authenticate(username=userMail, password=userPass)
login(request, user)
return redirect("/HMator/streamPage")
else:
messages.add_message(request, messages.WARNING, 'Incorrect Information. Please try again.')
return render(request, 'mainApp/registration.html', {'viewPage': 'signupbox'})
# request was empty
def signInUser(request):
userMail = request.POST['email']
userPass = request.POST['password']
logout(request)
if userMail and userPass:
user = authenticate(username=userMail, password=userPass)
if user is not None:
# the password verified for the user
if user.is_active:
print(user)
login(request, user)
print("User is valid, active and authenticated")
messages.add_message(request, messages.SUCCESS, 'Welcome! ' + userMail)
return redirect('/HMator/streamPage')
else:
messages.add_message(request, messages.WARNING,
'The password is valid, but the account has been disabled!')
return render(request, 'mainApp/registration.html', {'viewPage': 'loginbox'})
else:
# the authentication system was unable to verify the username and password
messages.add_message(request, messages.WARNING, 'Username or Password is wrong. Please try again.')
return render(request, 'mainApp/registration.html', {'viewPage': 'loginbox'})
else:
messages.add_message(request, messages.WARNING, 'Incorrect information. Please try again.')
return render(request, 'mainApp/registration.html', {'viewPage': 'loginbox'})
def signOut(request):
logout(request)
return redirect("/HMator/")
def streamPage(request):
print(datetime.now().time())
request.session['currentUserId']=request.user.id
if request.user.is_authenticated():
try:
user = request.user
fbSettings = UserSocialProfile.objects.get(serviceType='FACEBOOK', user=user)
twitterSettings = UserSocialProfile.objects.get(serviceType='TWITTER', user=user)
fbPosts = fbSettings.getLatestStreamFacebook()
twitterPosts = twitterSettings.getLatestStreamTwitter()
if request.session.get('statusPost'):
if request.session.get('fbStatus') == 'posted':
messages.add_message(request, messages.SUCCESS, 'Post successFully posted to Facebook.')
if request.session.get('twitterStatus') == 'posted':
messages.add_message(request, messages.SUCCESS, 'Post successFully posted to Twitter.')
else:
messages.add_message(request, messages.WARNING, 'Post failed post at Facebook.')
request.session['statusPost'] = None
request.session['fbStatus'] = None
request.session['twitterStatus'] = None
context = {'fbSettings': fbSettings, 'fbPosts': fbPosts, 'twitterSettings': twitterSettings,
'twitterPosts': twitterPosts}
return render(request, 'mainApp/streams.html', context)
except UserSocialProfile.DoesNotExist:
messages.add_message(request, messages.INFO, 'Facebook account is not connected.')
return render(request, 'mainApp/streams.html')
else:
messages.add_message(request, messages.WARNING, 'Please login before continue.')
return redirect("/HMator/")
def connectToFacebook(request):
appId = "Use your api key"
scope = "user_about_me,publish_actions,read_stream"
redirectUrl = "http://127.0.0.1:8000/HMator/facebook"
fbUrl = "http://graph.facebook.com/oauth/authorize?client_id=" + appId + "&redirect_uri=" + redirectUrl + "&scope=" + scope
return HttpResponseRedirect(fbUrl)
def saveFacebookSettings(request):
redirectUrl = "http://127.0.0.1:8000/HMator/facebook"
appId = "Use your api key"
appSecret = "Use your api key"
code = request.GET['code']
facebookAccessTokenUrl = "https://graph.facebook.com/oauth/access_token?client_id=" + appId + "&redirect_uri=" + redirectUrl + "&client_secret=" + appSecret + "&code=" + code
fbResponse = urllib2.urlopen(facebookAccessTokenUrl).read()
accessToken = ((fbResponse.split("="))[1]).split("&")[0]
request.session['accessToken'] = accessToken
print(request.session.get('accessToken'))
return redirect("/HMator/getFacebookInfo")
def getFacebookInfo(request):
if request.session.get('accessToken'):
user = MainUser.objects.get(user=request.user)
accessToken = request.session.get('accessToken')
requestedFields = "first_name,last_name,email,name"
requestUrl = "https://graph.facebook.com/me?fields=" + requestedFields + "&access_token=" + accessToken
result = urllib2.urlopen(requestUrl)
content = json.load(result)
facebookSetting = UserSocialProfile(userSocialId=content['id'], firstName=content['first_name'],
lastName=content['last_name'], accessToken=accessToken,
serviceType='FACEBOOK',
emailAddress=content['email'], fullName=content['name'], user=user)
facebookSetting.save()
return redirect("/HMator/streamPage")
else:
return redirect("/HMator/connectToFacebook")
@csrf_exempt
def autoMaticPostStatus(request):
print(request.POST)
print(request.session.get('currentUserId'))
user = User.objects.get(id=1)
mainUser = MainUser.objects.get(user=user)
print("-------------------------------------------------------------------")
twitterSettings = UserSocialProfile.objects.get(user=mainUser,serviceType='TWITTER')
fbSettings = UserSocialProfile.objects.get(user=mainUser,serviceType='FACEBOOK')
messageList = SocialMessage.objects.filter(user=mainUser)
currentTime = datetime.now().time()
print(currentTime)
print(messageList)
for msg in messageList:
print(msg.messageContent)
if currentTime.hour==msg.messageTime.hour:
print("***********************************************************************")
print(msg.messageContent)
# if fbSettings is not None:
# postStatusToFaceBook(fbSettings.accessToken,msg.messageContent)
# if twitterSettings is not None:
# postStatusToTwitter(twitterSettings.accessToken,twitterSettings.accessTokenSecret,msg.messageContent)
# return 'Post Messages'
def postStatus(request):
statusText = request.POST['status']
if statusText:
try:
if request.POST.get('facebook'):
fbSettings = UserSocialProfile.objects.get(serviceType='FACEBOOK', user=request.user)
if fbSettings and postStatusToFaceBook(fbSettings.accessToken, statusText):
request.session['fbStatus'] = 'posted'
if request.POST.get('twitter'):
twitterSettings = UserSocialProfile.objects.get(serviceType='TWITTER', user=request.user)
if twitterSettings and postStatusToTwitter(twitterSettings.accessToken,
twitterSettings.accessTokenSecret, statusText):
request.session['twitterStatus'] = 'posted'
request.session['statusPost'] = True
return redirect('/HMator/streamPage')
except UserSocialProfile.DoesNotExist:
messages.add_message(request, messages.INFO, 'Facebook account is not connected.')
return redirect('/HMator/streamPage')
else:
messages.add_message(request, messages.WARNING,
'Incorrect information.Please retry by submit your message again.')
return redirect('/HMator/streamPage')
def connectToTwitter(request):
appId = "Use your api key"
appSecret = "Use your api key"
twitter = Twython(appId, appSecret)
auth = twitter.get_authentication_tokens(callback_url='http://127.0.0.1:8000/HMator/saveTwitterSettings/')
OAUTH_TOKEN = auth['oauth_token']
OAUTH_TOKEN_SECRET = auth['oauth_token_secret']
print(OAUTH_TOKEN)
print(OAUTH_TOKEN_SECRET)
request.session['OAUTH_TOKEN'] = OAUTH_TOKEN
request.session['OAUTH_TOKEN_SECRET'] = OAUTH_TOKEN_SECRET
return redirect(auth['auth_url'])
def saveTwitterSettings(request):
if request.session.get('OAUTH_TOKEN_SECRET') and request.session.get('OAUTH_TOKEN'):
user = request.user
appId = "Use your api key"
appSecret = "Use your api key"
OAUTH_TOKEN = request.session.get('OAUTH_TOKEN')
OAUTH_TOKEN_SECRET = request.session.get('OAUTH_TOKEN_SECRET')
oauth_verifier = request.GET['oauth_verifier']
twitter = Twython(appId, appSecret, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
final_step = twitter.get_authorized_tokens(oauth_verifier)
FOAUTH_TOKEN = final_step['oauth_token']
FOAUTH_TOKEN_SECERT = final_step['oauth_token_secret']
request.session['FOAUTH_TOKEN'] = FOAUTH_TOKEN
request.session['FOAUTH_TOKEN_SECERT'] = FOAUTH_TOKEN_SECERT
return redirect("/HMator/getTwitterInfo")
else:
return redirect("/HMator/connectToTwitter")
def getTwitterInfo(request):
if request.session.get('FOAUTH_TOKEN') and request.session.get('FOAUTH_TOKEN_SECERT'):
user = MainUser.objects.get(user=request.user)
accessToken = request.session.get('FOAUTH_TOKEN')
accessTokenSecret = request.session.get('FOAUTH_TOKEN_SECERT')
appId = "Use your api key"
appSecret = "Use your api key"
twitter = Twython(appId, appSecret, accessToken, accessTokenSecret)
content = twitter.verify_credentials()
print(content['id'])
twitterSettings = UserSocialProfile(userSocialId=content['id'], accessToken=accessToken,
accessTokenSecret=accessTokenSecret,
serviceType='TWITTER', fullName=content['name'], user=user)
print(content['name'])
twitterSettings.save()
return redirect("/HMator/streamPage")
else:
return redirect("/HMator/saveTwitterSettings")
def settingsPage(request):
if request.user.is_authenticated():
mainUser = MainUser.objects.get(user=request.user)
userSocialProfileList = UserSocialProfile.objects.filter(user=mainUser)
accountList = []
for userSocialProfile in userSocialProfileList:
accountList.append(userSocialProfile.serviceType)
socialMessages = SocialMessage.objects.filter(user=mainUser)
return render(request, 'mainApp/settings.html', {'user': request.user, 'accountList': accountList,'socialMessages':socialMessages})
else:
messages.add_message(request, messages.WARNING,
'Please login to continue.')
return redirect('/HMator/')
def updateUserSettings(request):
mainUser = MainUser.objects.get(id=request.POST['userId'])
if mainUser:
print(request.POST['firstName'])
mainUser.user.first_name = request.POST['firstName']
mainUser.user.last_name = request.POST['lastName']
mainUser.mobile = request.POST['phoneNumber']
mainUser.user.save(force_update=True)
mainUser.save(force_update=True)
messages.add_message(request, messages.INFO,
'User settings updated successfully.')
return redirect("/HMator/settingsPage")
else:
messages.add_message(request, messages.WARNING,
'Something went wrong. Please try again.')
return redirect("/HMator/settingsPage")
def saveSocialMessages(request):
mainUser = MainUser.objects.get(user=request.user)
messageTextList = request.POST.getlist('messageText')
messageHourList = request.POST.getlist('messageHour')
messageMinList = request.POST.getlist('messageMin')
existMessages = SocialMessage.objects.filter(user=mainUser)
for msg in existMessages:
msg.delete()
for idx,messageText in enumerate(messageTextList):
if messageHourList[idx]!='' and messageText!='':
socialMessage = SocialMessage(user=mainUser,messageContent=messageText,messageTime=datetime.now().replace(hour=int(messageHourList[idx]),minute=int(messageMinList[idx])).time())
socialMessage.save()
messages.add_message(request, messages.INFO,
'Messages saved successfully.')
return redirect("/HMator/settingsPage")
def deleteAllSocialMessage(request):
user = MainUser.objects.get(user = request.user)
existMessages = SocialMessage.objects.filter(user=user)
for msg in existMessages:
msg.delete()
return render_to_response('mainApp/socialMessageList.html')
|
|
"""
Testing signals emitted on changing m2m relations.
"""
from .models import Person
from django.db import models
from django.test import TestCase
from .models import Part, Car, SportsCar, Person
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
# Install a listener on one of the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
# Test the add, remove and clear methods on both sides of the
# many-to-many relation
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
# Now install a listener
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyata some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# clear all parts of the self.vw
self.vw.default_parts.clear()
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the doors off of cars
self.doors.car_set.clear()
expected_messages.append({
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
expected_messages.append({
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset,self.doors,self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_with_self(self):
expected_messages = []
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
self.alice.friends = [self.bob, self.chuck]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.alice.fans = [self.daisy]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.chuck.idols = [self.alice,self.bob]
expected_messages.append({
'instance': self.chuck,
'action': 'pre_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
|
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
import pytest
import py
import sys
import time
import platform
import _pytest._pluggy as pluggy
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default='', metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"The pytest warnings are displayed at all times except when "
"--disable-pytest-warnings is set")
group._addoption('--disable-pytest-warnings', default=False,
dest='disablepytestwarnings', action='store_true',
help='disable warnings summary, overrides -r w flag')
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disablepytestwarnings and 'w' not in reportchars:
reportchars += 'w'
elif config.option.disablepytestwarnings and 'w' in reportchars:
reportchars = reportchars.replace('w', '')
if reportchars:
for char in reportchars:
if char not in reportopts and char != 'a':
reportopts += char
elif char == 'a':
reportopts = 'fEsxXw'
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
def __init__(self, code, message, nodeid=None, fslocation=None):
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
class TerminalReporter:
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = self.writer = _pytest.config.create_terminal_writer(config,
file)
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not py.builtin._istext(line):
line = py.builtin.text(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
line = str(line)
self._tw.write("\r" + line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in py.builtin.text(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
if isinstance(fslocation, tuple):
fslocation = "%s:%d" % fslocation
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
if self.verbosity <= 0:
if not hasattr(rep, 'node') and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
else:
if isinstance(word, tuple):
word, markup = word
else:
if rep.passed:
markup = {'green':True}
elif rep.failed:
markup = {'red':True}
elif rep.skipped:
markup = {'yellow':True}
line = self._locationline(rep.nodeid, *rep.location)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
#self._tw.write(word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("[%s] " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
#self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " items"
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
if final:
line += " \n"
self.rewrite(line, bold=True)
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append(
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
if not self.showheader:
return
#for i, testarg in enumerate(self.config.args):
# self.write_line("test path %d: %s" %(i+1, testarg))
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
#if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(terminalreporter=self,
exitstatus=exitstatus)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
l = domain.split("[")
l[0] = l[0].replace('.', '::') # don't replace '.' in params
line += "[".join(l)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", "/"):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
l = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
l.append(x)
return l
def summary_warnings(self):
if self.hasopt("w"):
warnings = self.stats.get("warnings")
if not warnings:
return
self.write_sep("=", "pytest-warning summary")
for w in warnings:
self._tw.line("W%s %s %s" % (w.code,
w.fslocation, w.message))
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports('passed')
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
for secname, content in rep.sections:
if 'teardown' in secname:
self._tw.sep('-', secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {'red': True, 'bold': True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(''):
if report.nodeid == rep.nodeid and report.when == 'teardown':
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
self.write_sep("=", "%d tests deselected" % (
len(self.stats['deselected'])), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(l):
for x in l:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings error").split()
key_translation = {'warnings': 'pytest-warnings'}
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
key_name = key_translation.get(key, key)
parts.append("%d %s" % (len(val), key_name))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if 'failed' in stats or 'error' in stats:
color = 'red'
elif 'warnings' in stats or unknown_key_seen:
color = 'yellow'
elif 'passed' in stats:
color = 'green'
else:
color = 'yellow'
return (line, color)
def _plugin_nameversions(plugininfo):
l = []
for plugin, dist in plugininfo:
# gets us name and version!
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in l:
l.append(name)
return l
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.videointelligence.v1p1beta1 VideoIntelligenceService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import grpc
from google.cloud.videointelligence_v1p1beta1.gapic import enums
from google.cloud.videointelligence_v1p1beta1.gapic import video_intelligence_service_client_config
from google.cloud.videointelligence_v1p1beta1.gapic.transports import video_intelligence_service_grpc_transport
from google.cloud.videointelligence_v1p1beta1.proto import video_intelligence_pb2
from google.cloud.videointelligence_v1p1beta1.proto import video_intelligence_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-videointelligence', ).version
class VideoIntelligenceServiceClient(object):
"""Service that implements Google Cloud Video Intelligence API."""
SERVICE_ADDRESS = 'videointelligence.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.VideoIntelligenceServiceGrpcTransport,
Callable[[~.Credentials, type], ~.VideoIntelligenceServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = video_intelligence_service_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=video_intelligence_service_grpc_transport.
VideoIntelligenceServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = video_intelligence_service_grpc_transport.VideoIntelligenceServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def annotate_video(self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Performs asynchronous video annotation. Progress and results can be
retrieved through the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress).
``Operation.response`` contains ``AnnotateVideoResponse`` (results).
Example:
>>> from google.cloud import videointelligence_v1p1beta1
>>> from google.cloud.videointelligence_v1p1beta1 import enums
>>>
>>> client = videointelligence_v1p1beta1.VideoIntelligenceServiceClient()
>>>
>>> input_uri = 'gs://demomaker/cat.mp4'
>>> features_element = enums.Feature.LABEL_DETECTION
>>> features = [features_element]
>>>
>>> response = client.annotate_video(input_uri=input_uri, features=features)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
input_uri (str): Input video location. Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video
URI may include wildcards in ``object-id``, and thus identify multiple
videos. Supported wildcards: '\*' to match 0 or more characters; '?' to
match 1 character. If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content`` should be unset.
input_content (bytes): The video data bytes. If unset, the input video(s) should be specified
via ``input_uri``. If set, ``input_uri`` should be unset.
features (list[~google.cloud.videointelligence_v1p1beta1.types.Feature]): Requested video annotation features.
video_context (Union[dict, ~google.cloud.videointelligence_v1p1beta1.types.VideoContext]): Additional video context and/or feature-specific parameters.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.videointelligence_v1p1beta1.types.VideoContext`
output_uri (str): Optional location where the output (in JSON format) should be stored.
Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__.
location_id (str): Optional cloud region where annotation should take place. Supported
cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``,
``asia-east1``. If no region is specified, a region will be determined
based on video file location.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.videointelligence_v1p1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'annotate_video' not in self._inner_api_calls:
self._inner_api_calls[
'annotate_video'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.annotate_video,
default_retry=self._method_configs['AnnotateVideo'].retry,
default_timeout=self._method_configs['AnnotateVideo'].
timeout,
client_info=self._client_info,
)
request = video_intelligence_pb2.AnnotateVideoRequest(
input_uri=input_uri,
input_content=input_content,
features=features,
video_context=video_context,
output_uri=output_uri,
location_id=location_id,
)
operation = self._inner_api_calls['annotate_video'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
video_intelligence_pb2.AnnotateVideoResponse,
metadata_type=video_intelligence_pb2.AnnotateVideoProgress,
)
|
|
"""Tests for the Risco alarm control panel device."""
from unittest.mock import MagicMock, PropertyMock, patch
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM_DOMAIN
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.components.risco import CannotConnectError, UnauthorizedError
from homeassistant.components.risco.const import DOMAIN
from homeassistant.const import (
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_component import async_update_entity
from .util import TEST_CONFIG, TEST_SITE_UUID, setup_risco
from tests.common import MockConfigEntry
FIRST_ENTITY_ID = "alarm_control_panel.risco_test_site_name_partition_0"
SECOND_ENTITY_ID = "alarm_control_panel.risco_test_site_name_partition_1"
CODES_REQUIRED_OPTIONS = {"code_arm_required": True, "code_disarm_required": True}
TEST_RISCO_TO_HA = {
"arm": STATE_ALARM_ARMED_AWAY,
"partial_arm": STATE_ALARM_ARMED_HOME,
"A": STATE_ALARM_ARMED_HOME,
"B": STATE_ALARM_ARMED_HOME,
"C": STATE_ALARM_ARMED_NIGHT,
"D": STATE_ALARM_ARMED_NIGHT,
}
TEST_FULL_RISCO_TO_HA = {
**TEST_RISCO_TO_HA,
"D": STATE_ALARM_ARMED_CUSTOM_BYPASS,
}
TEST_HA_TO_RISCO = {
STATE_ALARM_ARMED_AWAY: "arm",
STATE_ALARM_ARMED_HOME: "partial_arm",
STATE_ALARM_ARMED_NIGHT: "C",
}
TEST_FULL_HA_TO_RISCO = {
**TEST_HA_TO_RISCO,
STATE_ALARM_ARMED_CUSTOM_BYPASS: "D",
}
CUSTOM_MAPPING_OPTIONS = {
"risco_states_to_ha": TEST_RISCO_TO_HA,
"ha_states_to_risco": TEST_HA_TO_RISCO,
}
FULL_CUSTOM_MAPPING = {
"risco_states_to_ha": TEST_FULL_RISCO_TO_HA,
"ha_states_to_risco": TEST_FULL_HA_TO_RISCO,
}
EXPECTED_FEATURES = (
SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_NIGHT
)
def _partition_mock():
return MagicMock(
triggered=False,
arming=False,
armed=False,
disarmed=False,
partially_armed=False,
)
@pytest.fixture
def two_part_alarm():
"""Fixture to mock alarm with two partitions."""
partition_mocks = {0: _partition_mock(), 1: _partition_mock()}
alarm_mock = MagicMock()
with patch.object(
partition_mocks[0], "id", new_callable=PropertyMock(return_value=0)
), patch.object(
partition_mocks[1], "id", new_callable=PropertyMock(return_value=1)
), patch.object(
alarm_mock,
"partitions",
new_callable=PropertyMock(return_value=partition_mocks),
), patch(
"homeassistant.components.risco.RiscoAPI.get_state",
return_value=alarm_mock,
):
yield alarm_mock
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=CannotConnectError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = er.async_get(hass)
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
async def test_unauthorized(hass):
"""Test unauthorized error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=UnauthorizedError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = er.async_get(hass)
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
async def test_setup(hass, two_part_alarm):
"""Test entity setup."""
registry = er.async_get(hass)
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
await setup_risco(hass)
assert registry.async_is_registered(FIRST_ENTITY_ID)
assert registry.async_is_registered(SECOND_ENTITY_ID)
registry = dr.async_get(hass)
device = registry.async_get_device({(DOMAIN, TEST_SITE_UUID + "_0")})
assert device is not None
assert device.manufacturer == "Risco"
device = registry.async_get_device({(DOMAIN, TEST_SITE_UUID + "_1")})
assert device is not None
assert device.manufacturer == "Risco"
async def _check_state(hass, alarm, property, state, entity_id, partition_id):
with patch.object(alarm.partitions[partition_id], property, return_value=True):
await async_update_entity(hass, entity_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == state
async def test_states(hass, two_part_alarm):
"""Test the various alarm states."""
await setup_risco(hass, [], CUSTOM_MAPPING_OPTIONS)
assert hass.states.get(FIRST_ENTITY_ID).state == STATE_UNKNOWN
for partition_id, entity_id in {0: FIRST_ENTITY_ID, 1: SECOND_ENTITY_ID}.items():
await _check_state(
hass,
two_part_alarm,
"triggered",
STATE_ALARM_TRIGGERED,
entity_id,
partition_id,
)
await _check_state(
hass, two_part_alarm, "arming", STATE_ALARM_ARMING, entity_id, partition_id
)
await _check_state(
hass,
two_part_alarm,
"armed",
STATE_ALARM_ARMED_AWAY,
entity_id,
partition_id,
)
await _check_state(
hass,
two_part_alarm,
"partially_armed",
STATE_ALARM_ARMED_HOME,
entity_id,
partition_id,
)
await _check_state(
hass,
two_part_alarm,
"disarmed",
STATE_ALARM_DISARMED,
entity_id,
partition_id,
)
groups = {"A": False, "B": False, "C": True, "D": False}
with patch.object(
two_part_alarm.partitions[partition_id],
"groups",
new_callable=PropertyMock(return_value=groups),
):
await _check_state(
hass,
two_part_alarm,
"partially_armed",
STATE_ALARM_ARMED_NIGHT,
entity_id,
partition_id,
)
async def _test_service_call(
hass, service, method, entity_id, partition_id, *args, **kwargs
):
with patch(f"homeassistant.components.risco.RiscoAPI.{method}") as set_mock:
await _call_alarm_service(hass, service, entity_id, **kwargs)
set_mock.assert_awaited_once_with(partition_id, *args)
async def _test_no_service_call(
hass, service, method, entity_id, partition_id, **kwargs
):
with patch(f"homeassistant.components.risco.RiscoAPI.{method}") as set_mock:
await _call_alarm_service(hass, service, entity_id, **kwargs)
set_mock.assert_not_awaited()
async def _call_alarm_service(hass, service, entity_id, **kwargs):
data = {"entity_id": entity_id, **kwargs}
await hass.services.async_call(
ALARM_DOMAIN, service, service_data=data, blocking=True
)
async def test_sets_custom_mapping(hass, two_part_alarm):
"""Test settings the various modes when mapping some states."""
await setup_risco(hass, [], CUSTOM_MAPPING_OPTIONS)
registry = er.async_get(hass)
entity = registry.async_get(FIRST_ENTITY_ID)
assert entity.supported_features == EXPECTED_FEATURES
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C"
)
async def test_sets_full_custom_mapping(hass, two_part_alarm):
"""Test settings the various modes when mapping all states."""
await setup_risco(hass, [], FULL_CUSTOM_MAPPING)
registry = er.async_get(hass)
entity = registry.async_get(FIRST_ENTITY_ID)
assert (
entity.supported_features == EXPECTED_FEATURES | SUPPORT_ALARM_ARM_CUSTOM_BYPASS
)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "group_arm", FIRST_ENTITY_ID, 0, "D"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "group_arm", SECOND_ENTITY_ID, 1, "D"
)
async def test_sets_with_correct_code(hass, two_part_alarm):
"""Test settings the various modes when code is required."""
await setup_risco(hass, [], {**CUSTOM_MAPPING_OPTIONS, **CODES_REQUIRED_OPTIONS})
code = {"code": 1234}
await _test_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C", **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C", **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
"partial_arm",
SECOND_ENTITY_ID,
1,
**code,
)
async def test_sets_with_incorrect_code(hass, two_part_alarm):
"""Test settings the various modes when code is required and incorrect."""
await setup_risco(hass, [], {**CUSTOM_MAPPING_OPTIONS, **CODES_REQUIRED_OPTIONS})
code = {"code": 4321}
await _test_no_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
"partial_arm",
SECOND_ENTITY_ID,
1,
**code,
)
|
|
"""
DecMeg2014 example code.
Simple prediction of the class labels of the test set by:
- pooling all the training trials of all subjects in one dataset.
- Extracting the MEG data in the first 500ms from when the
stimulus starts.
- Projecting with RandomProjection
- Using a classifier.
Copyright Emanuele Olivetti 2014, BSD license, 3 clauses.
"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from scipy.io import loadmat
import scipy.signal as sig
from sktensor import dtensor, cp_als
from sklearn.cross_validation import LeavePLabelOut
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import os
def view_filter(b, a):
w, h = sig.freqz(b, a)
plt.plot(w / abs(w), np.abs(h))
def notch(Wn, bandwidth):
"""
Notch filter to kill line-noise.
"""
f = Wn / 2.0
R = 1.0 - 3.0 * (bandwidth / 2.0)
num = 1.0 - 2.0 * R * np.cos(2 * np.pi * f) + R ** 2.
denom = 2.0 - 2.0 * np.cos(2 * np.pi * f)
K = num / denom
b = np.zeros(3)
a = np.zeros(3)
a[0] = 1.0
a[1] = -2.0 * R * np.cos(2 * np.pi * f)
a[2] = R ** 2.
b[0] = K
b[1] = -2.0 * K * np.cos(2 * np.pi * f)
b[2] = K
return b, a
def window(XX, lower_limit=160, tmin=0.0, tmax=0.5, sfreq=250, tmin_original=-.5):
# We throw away all the MEG data outside the first 0.5sec from when
# the visual stimulus start:
print("Restricting MEG data to the interval [%s, %s] sec." % (tmin, tmax))
XX = XX[:, lower_limit:, :]
# instead of post-stimulus centering
print("Apply desired time window and drop sensors 0 to %i." % lower_limit)
beginning = np.round((tmin - tmin_original) * sfreq).astype(np.int)
end = np.round((tmax - tmin_original) * sfreq).astype(np.int)
XX = XX[:, :, beginning:end].copy()
return XX
def notch_filter(XX):
# Assuming 250Hz == fs, 125Hz == fs/2, 50Hz = 50/125 = .4
# 5 Hz bw = 5/125 = .04
print("Applying notch filter for powerline.")
bw = .04
freq = .4
b, a = notch(freq, bw)
XX = sig.lfilter(b, a, XX)
# Assuming 250Hz == fs, 125Hz == fs/2, 50Hz = 10/125 = .08
# 5 Hz bw = 5/125 = .04
print("Applying filter for alpha wave.")
bw = .04
freq = .08
b, a = notch(freq, bw)
XX = sig.lfilter(b, a, XX)
return XX
def window_baseline(XX, lower_limit=160):
baseline = XX[:, lower_limit:, :125].mean(-1)
XX = window(XX)
print("Baseline.")
XX -= baseline[..., np.newaxis]
return XX
def window_filter(XX):
XX = window(XX)
XX = notch_filter(XX)
return XX
def window_filter_baseline(XX, lower_limit=160):
baseline = XX[:, lower_limit:, :125].mean(-1)
XX = window(XX)
XX = notch_filter(XX)
XX -= baseline[..., np.newaxis]
return XX
def get_outlier_mask(XX):
print("Getting outlier mask.")
mask = (XX ** 2).sum(axis=-1).sum(axis=-1)
mask = mask.argsort()[10:-10]
return mask
def get_tensor_decomposition(XX, n=2):
print("CP-ALS Decomposition.")
T = dtensor(XX)
P, fit, itr, exectimes = cp_als(T, n, init='nvecs')
proj = P.U
return proj
def load_train_data(exclude_subject=16):
subjects_train = [i for i in range(1, 17) if i != exclude_subject]
print("Loading subjects", subjects_train)
X_train = []
y_train = []
label_count = []
print("Creating the trainset.")
for n, subject in enumerate(subjects_train):
filename = 'data/train_subject%02d.mat' % subject
print("Loading", filename)
data = loadmat(filename, squeeze_me=True)
XX = data['X']
yy = data['y']
XX = window_filter_baseline(XX)
X_train.append(XX)
y_train.append(yy)
label_count += [subject] * len(XX)
X_train = np.vstack(X_train)
y_train = np.concatenate(y_train)
print("Trainset:", X_train.shape)
return X_train, y_train, label_count
def load_val_data(subject=16):
subjects_val = [subject]
print("Loading subjects", subjects_val)
X_val = []
y_val = []
label_count = []
print("Creating the validation set.")
for n, subject in enumerate(subjects_val):
filename = 'data/train_subject%02d.mat' % subject
print("Loading", filename)
data = loadmat(filename, squeeze_me=True)
XX = data['X']
yy = data['y']
XX = window_filter_baseline(XX)
X_val.append(XX)
y_val.append(yy)
label_count += [subject] * len(XX)
X_val = np.vstack(X_val)
y_val = np.concatenate(y_val)
print("Validation set:", X_val.shape)
return X_val, y_val, label_count
def load_test_data():
subjects_test = range(17, 24)
print("Loading subjects", subjects_test)
X_test = []
ids_test = []
label_count = []
print("Creating the testset.")
for n, subject in enumerate(subjects_test):
filename = 'data/test_subject%02d.mat' % subject
print("Loading", filename)
data = loadmat(filename, squeeze_me=True)
XX = data['X']
ids = data['Id']
XX = window_filter_baseline(XX)
X_test.append(XX)
ids_test.append(ids)
label_count += [subject] * len(XX)
X_test = np.vstack(X_test)
ids_test = np.concatenate(ids_test)
print("Testset:", X_test.shape)
return X_test, ids_test, label_count
def get_data(val_index=16):
if val_index > 16:
raise ValueError("There are only 16 training patients!")
saved_data_path = "saved_data_val_%i.npz" % val_index
if not os.path.exists(saved_data_path):
print("Saved, preprocessed data not found in %s" % saved_data_path)
X_train, y_train, label_count_train = load_train_data()
X_test, ids_test, label_count_test = load_test_data()
X_val, y_val, label_count_val = load_val_data()
np.savez(saved_data_path,
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
X_test=X_test,
ids_test=ids_test,
label_count_train=label_count_train,
label_count_test=label_count_test,
label_count_val=label_count_val)
else:
print("Saved, preprocessed data found in %s" % saved_data_path)
npzfile = np.load(saved_data_path)
X_train = npzfile['X_train']
y_train = npzfile['y_train']
X_val = npzfile['X_val']
y_val = npzfile['y_val']
X_test = npzfile['X_test']
ids_test = npzfile['ids_test']
label_count_train = npzfile['label_count_train']
label_count_test = npzfile['label_count_test']
label_count_val = npzfile['label_count_val']
return (X_train, y_train, label_count_train, X_test, ids_test,
label_count_test, X_val, y_val, label_count_val)
def project_against_timeseries_tensors(X_train, X_test, X_val, label_count_train,
label_count_test, label_count_val):
n_basis = 75
saved_proj = "saved_time_projs_%s.npz" % n_basis
if not os.path.exists(saved_proj):
X_full = np.vstack((X_train, X_test, X_val))
label_count_full = np.concatenate((label_count_train, label_count_test,
label_count_val))
print("Saved time projection file not found in %s" % saved_proj)
print("Creating projections")
lol = LeavePLabelOut(label_count_full, p=1)
proj = []
for n, (train_index, test_index) in enumerate(lol):
print("Getting dictionary for patient %s" % n)
trial_proj, sensor_proj, time_proj = get_tensor_decomposition(
X_full[test_index], n_basis)
proj.append(time_proj)
proj = np.array(proj)
np.savez(saved_proj, proj=proj)
else:
print("Saved projection files found in %s" % saved_proj)
npzfile = np.load(saved_proj)
proj = npzfile['proj']
proj = np.max(proj, axis=-1)
X_train = np.dot(X_train, proj.T)
X_test = np.dot(X_test, proj.T)
X_val = np.dot(X_val, proj.T)
print("Shape of reduced train data %i x %i x %i" % X_train.shape)
print("Shape of reduced test data %i x %i x %i" % X_test.shape)
print("Shape of reduced val data %i x %i x %i" % X_val.shape)
return X_train, X_test, X_val
if __name__ == '__main__':
print("DecMeg2014: https://www.kaggle.com/c/decoding-the-human-brain")
validation_subject = 16
(X_train, y_train, label_count_train,
X_test, ids_test, label_count_test,
X_val, y_val, label_count_val) = get_data(val_index=validation_subject)
pipe = Pipeline([("scaler", StandardScaler()),
("clf", LogisticRegression(C=.1, penalty='l2'))])
X_train, X_test, X_val = project_against_timeseries_tensors(
X_train, X_test, X_val, label_count_train, label_count_test,
label_count_val)
print("Projection complete.")
X_train = X_train.reshape(X_train.shape[0], -1)
X_test = X_test.reshape(X_test.shape[0], -1)
X_val = X_val.reshape(X_val.shape[0], -1)
print("Training.")
pipe.fit(X_train, y_train)
print("Predicting validation subject.")
y_val_pred = pipe.predict(X_val)
acc = accuracy_score(y_val, y_val_pred)
print("Accuracy on validation subject %s" % acc)
print("Predicting test set.")
y_pred = pipe.predict(X_test)
filename_submission = "submission.csv"
print("Creating submission file", filename_submission)
with open(filename_submission, "w") as f:
f.write("Id,Prediction\n")
for i in range(len(y_pred)):
f.write(str(ids_test[i]) + "," + str(y_pred[i]) + "\n")
print("Done.")
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import six
import subprocess
import warnings
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from configparser import ConfigParser
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
class AirflowConfigException(Exception):
pass
try:
from cryptography.fernet import Fernet
except ImportError:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = process.communicate()
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = {AIRFLOW_HOME}/logs
# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
# must supply a remote location URL (starting with either 's3://...' or
# 'gs://...') and an Airflow connection id that provides access to the storage
# location.
remote_base_log_folder =
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# DEPRECATED option for remote log storage, use remote_base_log_folder instead!
s3_log_folder =
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool.
sql_alchemy_pool_size = 5
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30
# What security module to use (for example kerberos):
security =
# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1
# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30
# Secret key used to run your flask app
secret_key = temporary_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -
# Expose the configuration file in the web server
expose_config = False
# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user
# Default DAG orientation. Valid values are:
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = LR
# Puts the webserver in demonstration mode; blurs the names of Operators for
# privacy.
demo_mode = False
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an
# smtp server here
smtp_host = localhost
smtp_starttls = True
smtp_ssl = False
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the IP that Celery Flower runs on
flower_host = 0.0.0.0
# This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
run_duration = 1800
dag_dir_list_interval = 300
print_stats_interval = 30
min_file_process_interval = 180
child_process_log_directory = /tmp/airflow/scheduler/logs
# Statsd (https://github.com/etsy/statsd) integration settings
statsd_on = False
statsd_host = localhost
statsd_port = 8125
statsd_prefix = airflow
# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run. However airflow will never
# use more threads than the amount of cpu cores available.
max_threads = 2
authenticate = False
[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050
# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow
# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1
# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256
# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False
# Failover timeout in milliseconds.
# When checkpointing is enabled and this option is set, Mesos waits
# until the configured timeout for
# the MesosExecutor framework to re-register after a failover. Mesos
# shuts down running tasks if the
# MesosExecutor framework fails to re-register within this timeframe.
# failover_timeout = 604800
# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False
# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin
[kerberos]
ccache = /tmp/airflow_krb5_ccache
# gets augmented with fqdn
principal = airflow
reinit_frequency = 3600
kinit_path = kinit
keytab = airflow.keytab
[github_enterprise]
api_rev = v3
[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True
"""
TEST_CONFIG = """\
[core]
unit_test_mode = True
airflow_home = {AIRFLOW_HOME}
dags_folder = {TEST_DAGS_FOLDER}
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
load_examples = True
donot_pickle = False
dag_concurrency = 16
dags_are_paused_at_creation = False
fernet_key = {FERNET_KEY}
non_pooled_task_slot_count = 128
[operators]
default_owner = airflow
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
dag_orientation = LR
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_host = 0.0.0.0
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
max_threads = 2
"""
class AirflowConfigParser(ConfigParser):
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this
# is to not store password on boxes in text files.
as_command_stdout = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'celery_result_backend')
}
def __init__(self, *args, **kwargs):
ConfigParser.__init__(self, *args, **kwargs)
self.read_string(parameterized_config(DEFAULT_CONFIG))
self.is_validated = False
def read_string(self, string, source='<string>'):
"""
Read configuration from a string.
A backwards-compatible version of the ConfigParser.read_string()
method that was introduced in Python 3.
"""
# Python 3 added read_string() method
if six.PY3:
ConfigParser.read_string(self, string, source=source)
# Python 2 requires StringIO buffer
else:
import StringIO
self.readfp(StringIO.StringIO(string))
def _validate(self):
if (
self.get("core", "executor") != 'SequentialExecutor' and
"sqlite" in self.get('core', 'sql_alchemy_conn')):
raise AirflowConfigException(
"error: cannot use sqlite with the {}".format(
self.get('core', 'executor')))
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode") not in ['user', 'ldapgroup']
):
raise AirflowConfigException(
"error: owner_mode option should be either "
"'user' or 'ldapgroup' when filtering by owner is set")
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode").lower() == 'ldapgroup' and
self.get("core", "auth_backend") != (
'airflow.contrib.auth.backends.ldap_auth')
):
raise AirflowConfigException(
"error: attempt at using ldapgroup "
"filtering without using the Ldap backend")
self.is_validated = True
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
# if this is a valid command key...
if (section, key) in AirflowConfigParser.as_command_stdout:
# if the original key is present, return it no matter what
if self.has_option(section, key):
return ConfigParser.get(self, section, key)
# otherwise, execute the fallback key
elif self.has_option(section, fallback_key):
command = self.get(section, fallback_key)
return run_command(command)
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
# first check environment variables
option = self._get_env_var_option(section, key)
if option:
return option
# ...then the config file
if self.has_option(section, key):
return expand_env_var(
ConfigParser.get(self, section, key, **kwargs))
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
else:
logging.warning("section/key [{section}/{key}] not found "
"in config".format(**locals()))
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val.lower() in ('t', 'true', '1'):
return True
elif val.lower() in ('f', 'false', '0'):
return False
else:
raise AirflowConfigException(
'The value for configuration option "{}:{}" is not a '
'boolean (received "{}").'.format(section, key, val))
def getint(self, section, key):
return int(self.get(section, key))
def getfloat(self, section, key):
return float(self.get(section, key))
def read(self, filenames):
ConfigParser.read(self, filenames)
self._validate()
def as_dict(self, display_source=False, display_sensitive=False):
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg' or 'default'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
"""
cfg = copy.deepcopy(self._sections)
# remove __name__ (affects Python 2 only)
for options in cfg.values():
options.pop('__name__', None)
# add source
if display_source:
for section in cfg:
for k, v in cfg[section].items():
cfg[section][k] = (v, 'airflow config')
# add env vars and overwrite because they have priority
for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]:
try:
_, section, key = ev.split('__')
opt = self._get_env_var_option(section, key)
except ValueError:
opt = None
if opt:
if (
not display_sensitive
and ev != 'AIRFLOW__CORE__UNIT_TEST_MODE'):
opt = '< hidden >'
if display_source:
opt = (opt, 'env var')
cfg.setdefault(section.lower(), OrderedDict()).update(
{key.lower(): opt})
# add bash commands
for (section, key) in AirflowConfigParser.as_command_stdout:
opt = self._get_cmd_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'bash cmd')
cfg.setdefault(section, OrderedDict()).update({key: opt})
return cfg
def load_test_config(self):
"""
Load the unit test configuration.
Note: this is not reversible.
"""
# override any custom settings with defaults
self.read_string(parameterized_config(DEFAULT_CONFIG))
# then read test config
self.read_string(parameterized_config(TEST_CONFIG))
# then read any "custom" test settings
self.read(TEST_CONFIG_FILE)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'dags')
if os.path.exists(_TEST_DAGS_FOLDER):
TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')
def parameterized_config(template):
"""
Generates a configuration from the provided template + variables defined in
current scope
:param template: a config content templated with {{variables}}
"""
FERNET_KEY = generate_fernet_key()
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new airflow config file for unit tests in: " +
TEST_CONFIG_FILE)
with open(TEST_CONFIG_FILE, 'w') as f:
f.write(parameterized_config(TEST_CONFIG))
if not os.path.isfile(AIRFLOW_CONFIG):
# These configuration options are used to generate a default configuration
# when it is missing. The right way to change your configuration is to
# alter your configuration file, not this code.
logging.info("Creating new airflow config file in: " + AIRFLOW_CONFIG)
with open(AIRFLOW_CONFIG, 'w') as f:
f.write(parameterized_config(DEFAULT_CONFIG))
logging.info("Reading the config from " + AIRFLOW_CONFIG)
conf = AirflowConfigParser()
conf.read(AIRFLOW_CONFIG)
def load_test_config():
"""
Load the unit test configuration.
Note: this is not reversible.
"""
conf.load_test_config()
if conf.getboolean('core', 'unit_test_mode'):
load_test_config()
def get(section, key, **kwargs):
return conf.get(section, key, **kwargs)
def getboolean(section, key):
return conf.getboolean(section, key)
def getfloat(section, key):
return conf.getfloat(section, key)
def getint(section, key):
return conf.getint(section, key)
def has_option(section, key):
return conf.has_option(section, key)
def remove_option(section, option):
return conf.remove_option(section, option)
def as_dict(display_source=False, display_sensitive=False):
return conf.as_dict(
display_source=display_source, display_sensitive=display_sensitive)
as_dict.__doc__ = conf.as_dict.__doc__
def set(section, option, value): # noqa
return conf.set(section, option, value)
########################
# convenience method to access config entries
def get_dags_folder():
return os.path.expanduser(get('core', 'DAGS_FOLDER'))
|
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.html import escape
from horizon_lib.workflows import views
from mox import IsA # noqa
from openstack_horizon import api
from openstack_horizon.dashboards.project.networks import tables
from openstack_horizon.dashboards.project.networks import workflows
from openstack_horizon.test import helpers as test
from openstack_horizon.usage import quotas
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
if data['ip_version'] == 6:
data['ipv6_modes'] = subnet.ipv6_modes
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index_network_list_exception(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail(self):
self._test_network_detail()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_with_mac_learning(self):
self._test_network_detail(mac_learning=True)
def _test_network_detail(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception(self):
self._test_network_detail_network_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_network_exception(mac_learning=True)
def _test_network_detail_network_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_subnet_exception(self):
self._test_network_detail_subnet_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_subnet_exception_with_mac_learning(self):
self._test_network_detail_subnet_exception(mac_learning=True)
def _test_network_detail_subnet_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_port_exception(self):
self._test_network_detail_port_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_port_exception_with_mac_learning(self):
self._test_network_detail_port_exception(mac_learning=True)
def _test_network_detail_port_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self,
test_with_profile=False):
if test_with_profile:
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_get_with_profile(self):
self.test_network_create_get(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_profile(self):
self.test_network_create_post(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self,
test_with_profile=False,
test_with_ipv6=True):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
subnet_params = {'network_id': network.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip,
'enable_dhcp': subnet.enable_dhcp}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
if not test_with_ipv6:
subnet.ip_version = 4
subnet_params['ip_version'] = subnet.ip_version
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
**subnet_params).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_w_profile(self):
self.test_network_create_post_with_subnet(test_with_profile=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False})
def test_create_network_with_ipv6_disabled(self):
self.test_network_create_post_with_subnet(test_with_ipv6=False)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_nw_exception_w_profile(self):
self.test_network_create_post_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_nw_exception_w_profile(self):
self.test_network_create_post_with_subnet_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list')})
def test_network_create_post_with_subnet_subnet_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_subnet_exception_w_profile(self):
self.test_network_create_post_with_subnet_subnet_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_nocidr(self,
test_with_profile=False):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_no_cidr_w_profile(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_cidr_without_mask_w_profile(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_cidr_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_gw_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
network.subnets = []
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_get', 'subnet_get',)})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = form_data_subnet(subnet, cidr=cidr,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_invalid_address(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_nameservers(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_destination_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post(self):
network = self.networks.get(name="v6_net1")
subnet = self.subnets.get(name="v6_subnet1")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post_with_slaac_attributes(self):
network = self.networks.get(name="v6_net2")
subnet = self.subnets.get(name="v6_subnet2")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
ipv6_address_mode='slaac',
ipv6_ra_mode='slaac')\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True)
def _test_port_update_post_exception(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
class NetworkViewTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 0
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
create_link = tables.CreateNetwork()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='glyphicon glyphicon-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
|
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import nnabla as nn
import nnabla.function as _F
import nnabla.functions as F
import numpy as np
from nnabla.function import PythonFunction
from .utils import force_tuple
def double_backward_for_batch(g_dx0, g_db0, g_dg0,
dy, x0, b0, g0, rm, rv,
axes, decay_rate, eps):
# Prerequisite
# axes reduced and denominator
axes0 = [a for a in range(x0.ndim)]
axes = [a+x0.ndim*(a < 0) for a in axes]
axes = list(set(axes0) - set(axes))
F_sum = partial(F.sum, axis=axes, keepdims=True)
F_mean = partial(F.mean, axis=axes, keepdims=True)
de = np.prod([x0.shape[a] for a in axes]) # denominator
bm = F_mean(x0) # batch mean
bv = F_mean(x0 ** 2.0) - bm ** 2.0 # batch variance
x0_bm = x0 - bm # x0 - batch mean
v_eps_r1 = (bv + eps) ** -1.0 # (variance + eps) * (-1)
v_eps_rsqrt1 = (bv + eps) ** (-1.0 / 2.0) # (variance + eps) * (-1/2)
v_eps_rsqrt3 = v_eps_rsqrt1 ** 3.0 # (variance + eps) * (-3/2)
# common factors
dy_x0_bm_sum = F_sum(dy * x0_bm)
dy_sum = F_sum(dy)
g_dx0_x0_bm_sum = F_sum(g_dx0 * x0_bm)
g_dx0_sum = F_sum(g_dx0)
# wrt. x
# from dx
dv = (-1.0 / 2.0) * g0 * v_eps_rsqrt3 * F_sum(dy * x0_bm)
g_dx0_dy_sum = F_sum(g_dx0 * dy)
g1 = (-1.0 / de) * v_eps_rsqrt3 * g_dx0_dy_sum * g0 * x0_bm
g2 = (1.0 / de) * g0 * g_dx0_x0_bm_sum * v_eps_rsqrt3 * (1.0 / de * dy_sum - dy
+ (3.0 / de) * v_eps_r1 * dy_x0_bm_sum * x0_bm)
g2 += (2.0 / de) * dv * (g_dx0 - (1.0 / de) * g_dx0_sum)
g3 = (1.0 / de ** 2.0) * g_dx0_sum * dy_sum * g0 * v_eps_rsqrt3 * x0_bm
g_x0 = g1 + g2 + g3
# from gamma
t1 = (dy - dy_sum / de) * v_eps_rsqrt1
t2 = (- 1.0 / de) * dy_x0_bm_sum * v_eps_rsqrt3 * x0_bm
g_x0 += g_dg0 * (t1 + t2)
# wrt. beta
# zero, do nothing
# wrt. gamma
t1 = dy * v_eps_rsqrt1
t2 = (- 1.0 / de) * dy_x0_bm_sum * v_eps_rsqrt3 * x0_bm
t3 = (- 1.0 / de) * dy_sum * v_eps_rsqrt1
g_g0 = F_sum(g_dx0 * (t1 + t2 + t3))
# wrt. dy
t1 = g_dx0 * g0 * v_eps_rsqrt1
t2 = - (1.0 / de) * g0 * v_eps_rsqrt3 * g_dx0_x0_bm_sum * x0_bm
t3 = - (1.0 / de) * g0 * v_eps_rsqrt1 * g_dx0_sum
x0_hat = x0_bm * v_eps_rsqrt1
g_dy = (t1 + t2 + t3) + g_dg0 * x0_hat + g_db0
return g_dy, g_x0, None, g_g0
def double_backward_for_global(g_dx0, g_db0, g_dg0,
dy, x0, b0, g0, rm, rv,
axes, decay_rate, eps):
# Prerequisite
# axes reduced and denominator
axes0 = [a for a in range(x0.ndim)]
axes = list(set(axes0) - set(axes))
# (variance + eps) * (-1/2)
v_eps_rsqrt1 = (rv + eps) ** (-1.0 / 2.0)
# wrt. x
g_x0 = g_dg0 * dy * v_eps_rsqrt1
# wrt. beta
# zero, do nothing
# wrt. gamma
g_g0 = F.sum(g_dx0 * dy * v_eps_rsqrt1, axes, True)
# no backward wrt. rm and rv
# wrt. dy
g_dy = g_dx0 * g0 * v_eps_rsqrt1 \
+ g_dg0 * (x0 - rm) * v_eps_rsqrt1 + g_db0
return g_dy, g_x0, None, g_g0
class BatchNormalizationBackward(PythonFunction):
def __init__(self, ctx, axes=[], decay_rate=0.9, eps=1e-05, batch_stat=True,
no_scale=False, no_bias=False):
super(BatchNormalizationBackward, self).__init__(ctx)
self._func = _F.BatchNormalization(
ctx, axes, decay_rate, eps, batch_stat, no_scale, no_bias)
self.axes = axes
self.decay_rate = decay_rate
self.eps = eps
self.batch_stat = batch_stat
self.no_scale = no_scale
self.no_bias = no_bias
# Variable indices
self.dy_idx = 0
self.x0_idx = 1
self.b0_idx = None
self.g0_idx = None
self.rm_idx = None
self.rv_idx = None
v_idx = 2
if not no_bias:
self.b0_idx = v_idx
v_idx += 1
if not no_scale:
self.g0_idx = v_idx
v_idx += 1
self.rm_idx = v_idx
self.rv_idx = self.rm_idx + 1
@property
def name(self):
return self.__class__.__name__
@property
def args(self):
return self._func.args
def _create_fwd_inputs_outputs(self, inputs, outputs):
# inputs_fwd: x, (beta, gamma), rmean, rvar
# outputs_fwd: y(, bmean, bvar)
x0 = nn.Variable(inputs[self.x0_idx].shape).apply(need_grad=True)
b0 = nn.Variable(inputs[self.b0_idx].shape).apply(
need_grad=True) if not self.no_bias else None
g0 = nn.Variable(inputs[self.g0_idx].shape).apply(
need_grad=True) if not self.no_scale else None
rm = nn.Variable(inputs[self.rm_idx].shape).apply(need_grad=False)
rv = nn.Variable(inputs[self.rv_idx].shape).apply(need_grad=False)
inputs_fwd = list(
filter(lambda v: v is not None, [x0, b0, g0, rm, rv]))
oshape = inputs[0].shape
outputs_fwd = [nn.Variable(oshape)]
return inputs_fwd, outputs_fwd
def min_inputs(self):
return 4 # dy, x0, (b0, g0), rm, rv
def min_outputs(self):
return self.rm_idx - 1
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
return True
def setup_impl(self, inputs, outputs):
# inputs: dy, x, beta, gamma, rmean, rvar
# outputs: dx, dbeta, dgamma
inputs_fwd, outputs_fwd = self._create_fwd_inputs_outputs(
inputs, outputs)
self.axes = [a + inputs_fwd[0].ndim*(a < 0) for a in self.axes]
self._func.setup(inputs_fwd, outputs_fwd)
dx_shape = inputs_fwd[self.x0_idx - 1].shape
db_shape = inputs_fwd[self.b0_idx -
1].shape if not self.no_bias else None
dg_shape = inputs_fwd[self.g0_idx -
1].shape if not self.no_scale else None
outputs[self.x0_idx - 1].reset_shape(dx_shape, True)
outputs[self.b0_idx -
1].reset_shape(db_shape, True) if not self.no_bias else None
outputs[self.g0_idx -
1].reset_shape(dg_shape, True) if not self.no_scale else None
def forward_impl(self, inputs, outputs):
inputs_fwd, outputs_fwd = self._create_fwd_inputs_outputs(
inputs, outputs)
# BN data
x0 = inputs[self.x0_idx].data
b0 = inputs[self.b0_idx].data if not self.no_bias else None
g0 = inputs[self.g0_idx].data if not self.no_scale else None
inputs_fwd[self.x0_idx - 1].data = x0
if not self.no_bias:
inputs_fwd[self.b0_idx - 1].data = b0
if not self.no_scale:
inputs_fwd[self.g0_idx - 1].data = g0
if not self.batch_stat:
rm = inputs[self.rm_idx].data
rv = inputs[self.rv_idx].data
inputs_fwd[self.rm_idx - 1].data = rm
inputs_fwd[self.rv_idx - 1].data = rv
# BN grad
dx0 = outputs[self.x0_idx - 1].data
inputs_fwd[self.x0_idx - 1].grad = dx0
if not self.no_bias:
db0 = outputs[self.b0_idx - 1].data
inputs_fwd[self.b0_idx - 1].grad = db0
if not self.no_scale:
dg0 = outputs[self.g0_idx - 1].data
inputs_fwd[self.g0_idx - 1].grad = dg0
dy = inputs[0].data
outputs_fwd[0].grad = dy
# BN backward
self._func.forward(inputs_fwd, outputs_fwd) \
if self.batch_stat and not "cudnn" in self.ctx.backend else None
self._func.backward(inputs_fwd, outputs_fwd, [False] * len(inputs_fwd))
def backward_impl(self, inputs, outputs, propagate_down, accum):
g_dx0 = nn.Variable(
outputs[self.x0_idx - 1].shape).apply(data=outputs[0].grad)
g_db0 = nn.Variable(outputs[self.b0_idx - 1].shape).apply(data=outputs[self.b0_idx - 1].grad) \
if not self.no_bias else 0
g_dg0 = nn.Variable(outputs[self.g0_idx - 1].shape).apply(data=outputs[self.g0_idx - 1].grad) \
if not self.no_scale else 0
dy = nn.Variable(inputs[0].shape).apply(
data=inputs[0].data, need_grad=True)
x0 = nn.Variable(inputs[1].shape).apply(
data=inputs[1].data, need_grad=True)
b0 = nn.Variable(inputs[self.b0_idx].shape).apply(
data=inputs[self.b0_idx].data, need_grad=True) if not self.no_bias else 0
g0 = nn.Variable(inputs[self.g0_idx].shape).apply(
data=inputs[self.g0_idx].data, need_grad=True) if not self.no_scale else 1
rm = nn.Variable(inputs[self.rm_idx].shape).apply(
data=inputs[self.rm_idx].data)
rv = nn.Variable(inputs[self.rv_idx].shape).apply(
data=inputs[self.rv_idx].data)
double_backward = double_backward_for_batch \
if self.batch_stat else double_backward_for_global
with nn.auto_forward():
g_dy_, g_x0_, g_b0_, g_g0_ = double_backward(g_dx0, g_db0, g_dg0,
dy, x0, b0, g0, rm, rv,
self.axes, self.decay_rate, self.eps)
g_dy = inputs[0].grad
g_x0 = inputs[1].grad
g_g0 = inputs[self.g0_idx].grad if not self.no_scale else 0
# wrt dy
if propagate_down[0]:
if accum[0]:
g_dy += g_dy_.data
else:
g_dy.copy_from(g_dy_.data)
# wrt x
if propagate_down[1]:
if accum[1]:
g_x0 += g_x0_.data
else:
g_x0.copy_from(g_x0_.data)
# wrt g
if not self.no_scale and propagate_down[self.g0_idx]:
if accum[self.g0_idx]:
g_g0 += g_g0_.data
else:
g_g0.copy_from(g_g0_.data)
def batch_normalization_backward(inputs, axes=(1,), decay_rate=0.9, eps=1e-05,
batch_stat=True, no_scale=False, no_bias=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
ctx = nn.get_current_context()
axes = [a+inputs[1].ndim*(a < 0) for a in axes]
df = BatchNormalizationBackward(
ctx, axes, decay_rate, eps, batch_stat, no_scale, no_bias)
d_inputs = df(*inputs)
return force_tuple(d_inputs) + (None, None)
def batch_normalization_backward_backward(inputs, axes=(1,), decay_rate=0.9, eps=1e-05,
batch_stat=True, no_scale=False, no_bias=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
axes = [a+inputs[1].ndim*(a < 0) for a in axes]
# Align variable indices
v_idx = 1
x_idx = 0
if not no_bias:
g_db0_idx = v_idx
v_idx += 1
if not no_scale:
g_dg0_idx = v_idx
v_idx += 1
dy_idx = v_idx
v_idx += 1
x0_idx = v_idx
v_idx += 1
if not no_bias:
b0_idx = v_idx
v_idx += 1
if not no_scale:
g0_idx = v_idx
v_idx += 1
rm_idx = v_idx
rv_idx = rm_idx + 1
# Indexing
g_dx0 = inputs[0]
g_db0 = inputs[g_db0_idx] if not no_bias else 0
g_dg0 = inputs[g_dg0_idx] if not no_scale else 0
dy = inputs[dy_idx]
x0 = inputs[x0_idx]
b0 = inputs[b0_idx] if not no_bias else 0
g0 = inputs[g0_idx] if not no_scale else 1
rm = inputs[rm_idx]
rv = inputs[rv_idx]
double_backward = double_backward_for_batch \
if batch_stat else double_backward_for_global
g_dy, g_x0, g_b0, g_g0 = double_backward(g_dx0, g_db0, g_dg0,
dy, x0, b0, g0, rm, rv,
axes, decay_rate, eps)
if not no_bias and not no_scale:
return g_dy, g_x0, None, g_g0, None, None
if no_bias and no_scale:
return g_dy, g_x0, None, None
|
|
import collections
import datetime
import mock
import pytz
from babel import dates, Locale
from schema import Schema, And, Use, Or
from django.utils import timezone
from osf.modm_compat import Q
from modularodm.exceptions import NoResultsFound
from nose.tools import * # noqa PEP8 asserts
from framework.auth import Auth
from osf.models import Node, Comment, NotificationDigest, NotificationSubscription, Guid, OSFUser
from website.notifications.tasks import get_users_emails, send_users_email, group_by_node, remove_notifications
from website.notifications import constants
from website.notifications import emails
from website.notifications import utils
from website import mails, settings
from website.project.signals import contributor_removed, node_deleted
from website.util import api_url_for
from website.util import web_url_for
from osf_tests import factories
from tests.base import capture_signals
from tests.base import OsfTestCase, NotificationTestCase
class TestNotificationsModels(OsfTestCase):
def setUp(self):
super(TestNotificationsModels, self).setUp()
# Create project with component
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.parent = factories.ProjectFactory(creator=self.user)
self.node = factories.NodeFactory(creator=self.user, parent=self.parent)
def test_has_permission_on_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(non_admin_user, 'read')
)
def test_check_user_has_permission_excludes_deleted_components(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.is_deleted = True
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_does_not_have_permission_on_private_node_child(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_child_node_permissions_false_if_no_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_admin_has_permissions_on_private_component(self):
parent = factories.ProjectFactory()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(parent.creator,'read')
)
def test_check_user_private_node_child_permissions_excludes_pointers(self):
user = factories.UserFactory()
parent = factories.ProjectFactory()
pointed = factories.ProjectFactory(creator=user)
parent.add_pointer(pointed, Auth(parent.creator))
parent.save()
assert_false(
parent.has_permission_on_children(user,'read')
)
def test_new_project_creator_is_subscribed(self):
user = factories.UserFactory()
factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 2) # subscribed to both file_updated and comments
assert_in('file_updated', event_types)
assert_in('comments', event_types)
def test_new_node_creator_is_not_subscribed(self):
user = factories.UserFactory()
factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
assert_equal(len(user_subscriptions), 0)
def test_new_project_creator_is_subscribed_with_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
user=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_digest')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
user=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'none')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
user=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_digest')
node = factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(user_subscriptions), 5) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_mentions', event_types)
assert_equal(file_updated_subscription.none.count(), 1)
assert_equal(file_updated_subscription.email_transactional.count(), 0)
assert_equal(comments_subscription.email_digest.count(), 1)
assert_equal(comments_subscription.email_transactional.count(), 0)
def test_new_node_creator_is_not_subscribed_with_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
user=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_digest')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
user=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'none')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
user=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
user=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 4) # subscribed to only user settings
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
def test_subscribe_user_to_global_notfiications(self):
user = factories.UserFactory()
utils.subscribe_user_to_global_notifications(user)
subscription_event_names = list(user.notification_subscriptions.values_list('event_name', flat=True))
for event_name in constants.USER_SUBSCRIPTIONS_AVAILABLE:
assert_in(event_name, subscription_event_names)
def test_new_project_creator_is_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
user=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
user=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
user=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
user=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ProjectFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(user_subscriptions), 6) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
assert_equal(file_updated_subscription.email_transactional.count(), 1)
assert_equal(comments_subscription.email_transactional.count(), 1)
def test_new_fork_creator_is_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
project = factories.ProjectFactory(creator=user)
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
user=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
user=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
user=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ForkFactory(project=project)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
node_file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
node_comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
project_file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', project._id + '_file_updated'))
project_comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', project._id + '_comments'))
assert_equal(len(user_subscriptions), 7) # subscribed to project, fork, and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_mentions', event_types)
assert_equal(node_file_updated_subscription.email_transactional.count(), 1)
assert_equal(node_comments_subscription.email_transactional.count(), 1)
assert_equal(project_file_updated_subscription.email_transactional.count(), 1)
assert_equal(project_comments_subscription.email_transactional.count(), 1)
def test_new_node_creator_is_not_subscribed_with_default_global_settings(self):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comments',
user=user,
event_name='global_comments'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_file_updated',
user=user,
event_name='global_file_updated'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_comment_replies',
user=user,
event_name='global_comment_replies'
).add_user_to_subscription(user, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=user._id + '_' + 'global_mentions',
user=user,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.NodeFactory(creator=user)
user_subscriptions = list(utils.get_all_user_subscriptions(user))
event_types = [sub.event_name for sub in user_subscriptions]
assert_equal(len(user_subscriptions), 4) # subscribed to only user settings
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_in('global_comment_replies', event_types)
assert_in('global_mentions', event_types)
def test_contributor_subscribed_when_added_to_project(self):
user = factories.UserFactory()
contributor = factories.UserFactory()
project = factories.ProjectFactory(creator=user)
project.add_contributor(contributor=contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(contributor))
event_types = [sub.event_name for sub in contributor_subscriptions]
assert_equal(len(contributor_subscriptions), 2)
assert_in('file_updated', event_types)
assert_in('comments', event_types)
def test_contributor_subscribed_when_added_to_component(self):
user = factories.UserFactory()
contributor = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=contributor._id + '_' + 'global_comments',
user=contributor,
event_name='global_comments'
).add_user_to_subscription(contributor, 'email_transactional')
factories.NotificationSubscriptionFactory(
_id=contributor._id + '_' + 'global_file_updated',
user=contributor,
event_name='global_file_updated'
).add_user_to_subscription(contributor, 'email_transactional')
node = factories.NodeFactory(creator=user)
node.add_contributor(contributor=contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(contributor))
event_types = [sub.event_name for sub in contributor_subscriptions]
file_updated_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_file_updated'))
comments_subscription = NotificationSubscription.find_one(Q('_id', 'eq', node._id + '_comments'))
assert_equal(len(contributor_subscriptions), 4) # subscribed to both node and user settings
assert_in('file_updated', event_types)
assert_in('comments', event_types)
assert_in('global_file_updated', event_types)
assert_in('global_comments', event_types)
assert_equal(file_updated_subscription.email_transactional.count(), 1)
assert_equal(comments_subscription.email_transactional.count(), 1)
def test_unregistered_contributor_not_subscribed_when_added_to_project(self):
user = factories.UserFactory()
unregistered_contributor = factories.UnregUserFactory()
project = factories.ProjectFactory(creator=user)
project.add_contributor(contributor=unregistered_contributor)
contributor_subscriptions = list(utils.get_all_user_subscriptions(unregistered_contributor))
assert_equal(len(contributor_subscriptions), 0)
class TestSubscriptionView(OsfTestCase):
def setUp(self):
super(TestSubscriptionView, self).setUp()
self.node = factories.NodeFactory()
self.user = self.node.creator
def test_create_new_subscription(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# check that user was added to notification_type field
assert_equal(payload['id'], s.owner._id)
assert_equal(payload['event'], s.event_name)
assert_in(self.node.creator, getattr(s, payload['notification_type']).all())
# change subscription
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_digest'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
assert_false(self.node.creator in getattr(s, payload['notification_type']).all())
assert_in(self.node.creator, getattr(s, new_payload['notification_type']).all())
def test_adopt_parent_subscription_default(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
event_id = self.node._id + '_' + 'comments'
# confirm subscription was created because parent had default subscription
s = NotificationSubscription.find(Q('_id', 'eq', event_id)).count()
assert_equal(0, s)
def test_change_subscription_to_adopt_parent_subscription_removes_user(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# change subscription to adopt_parent
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
# assert that user is removed from the subscription entirely
for n in constants.NOTIFICATION_TYPES:
assert_false(self.node.creator in getattr(s, n).all())
def test_configure_subscription_adds_node_id_to_notifications_configured(self):
project = factories.ProjectFactory(creator=self.user)
assert_false(project._id in self.user.notifications_configured)
payload = {
'id': project._id,
'event': 'comments',
'notification_type': 'email_digest'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=project.creator.auth)
self.user.reload()
assert_true(project._id in self.user.notifications_configured)
class TestRemoveContributor(OsfTestCase):
def setUp(self):
super(OsfTestCase, self).setUp()
self.project = factories.ProjectFactory()
self.contributor = factories.UserFactory()
self.project.add_contributor(contributor=self.contributor, permissions=['read'])
self.project.save()
self.subscription = NotificationSubscription.find_one(
Q('node', 'eq', self.project) &
Q('_id', 'eq', self.project._id + '_comments')
)
self.node = factories.NodeFactory(parent=self.project)
self.node.add_contributor(contributor=self.project.creator, permissions=['read', 'write', 'admin'])
self.node.save()
self.node_subscription = NotificationSubscription.find_one(Q(
'_id', 'eq', self.node._id + '_comments') & Q('node', 'eq', self.node)
)
self.node_subscription.add_user_to_subscription(self.node.creator, 'email_transactional')
def test_removed_non_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.contributor, self.subscription.email_transactional.all())
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_not_in(self.contributor, self.project.contributors.all())
self.subscription.reload()
assert_not_in(self.contributor, self.subscription.email_transactional.all())
def test_removed_non_parent_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.node.creator, self.node_subscription.email_transactional.all())
self.node.remove_contributor(self.node.creator, auth=Auth(self.node.creator))
assert_not_in(self.node.creator, self.node.contributors.all())
self.node_subscription.reload()
assert_not_in(self.node.creator, self.node_subscription.email_transactional.all())
def test_removed_contributor_admin_on_parent_not_removed_from_node_subscription(self):
# Admin on parent project is removed as a contributor on a component. Check
# that admin is not removed from component subscriptions, as the admin
# now has read-only access.
assert_in(self.project.creator, self.node_subscription.email_transactional.all())
self.node.remove_contributor(self.project.creator, auth=Auth(self.project.creator))
assert_not_in(self.project.creator, self.node.contributors.all())
assert_in(self.project.creator, self.node_subscription.email_transactional.all())
def test_remove_contributor_signal_called_when_contributor_is_removed(self):
with capture_signals() as mock_signals:
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
class TestRemoveNodeSignal(OsfTestCase):
def test_node_subscriptions_and_backrefs_removed_when_node_is_deleted(self):
project = factories.ProjectFactory()
s = NotificationSubscription.find(Q('email_transactional', 'eq', project.creator))
assert_equal(s.count(), 2)
with capture_signals() as mock_signals:
project.remove_node(auth=Auth(project.creator))
assert_true(project.is_deleted)
assert_equal(mock_signals.signals_sent(), set([node_deleted]))
s = NotificationSubscription.find(Q('email_transactional', 'eq', project.creator))
assert_equal(s.count(), 0)
with assert_raises(NoResultsFound):
NotificationSubscription.find_one(Q('node', 'eq', project))
def list_or_dict(data):
# Generator only returns lists or dicts from list or dict
if isinstance(data, dict):
for key in data:
if isinstance(data[key], dict) or isinstance(data[key], list):
yield data[key]
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) or isinstance(item, list):
yield item
def has(data, sub_data):
# Recursive approach to look for a subset of data in data.
# WARNING: Don't use on huge structures
# :param data: Data structure
# :param sub_data: subset being checked for
# :return: True or False
try:
(item for item in data if item == sub_data).next()
return True
except StopIteration:
lists_and_dicts = list_or_dict(data)
for item in lists_and_dicts:
if has(item, sub_data):
return True
return False
def subscription_schema(project, structure, level=0):
# builds a schema from a list of nodes and events
# :param project: validation type
# :param structure: list of nodes (another list) and events
# :return: schema
sub_list = []
for item in list_or_dict(structure):
sub_list.append(subscription_schema(project, item, level=level+1))
sub_list.append(event_schema(level))
node_schema = {
'node': {
'id': Use(type(project._id), error="node_id{}".format(level)),
'title': Use(type(project.title), error="node_title{}".format(level)),
'url': Use(type(project.url), error="node_{}".format(level))
},
'kind': And(str, Use(lambda s: s in ('node', 'folder'),
error="kind didn't match node or folder {}".format(level))),
'nodeType': Use(lambda s: s in ('project', 'component'), error='nodeType not project or component'),
'category': Use(lambda s: s in settings.NODE_CATEGORY_MAP, error='category not in settings.NODE_CATEGORY_MAP'),
'permissions': {
'view': Use(lambda s: s in (True, False), error='view permissions is not True/False')
},
'children': sub_list
}
if level == 0:
return Schema([node_schema])
return node_schema
def event_schema(level=None):
return {
'event': {
'title': And(Use(str, error="event_title{} not a string".format(level)),
Use(lambda s: s in constants.NOTIFICATION_TYPES,
error="event_title{} not in list".format(level))),
'description': And(Use(str, error="event_desc{} not a string".format(level)),
Use(lambda s: s in constants.NODE_SUBSCRIPTIONS_AVAILABLE,
error="event_desc{} not in list".format(level))),
'notificationType': And(str, Or('adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)),
'parent_notification_type': Or(None, 'adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)
},
'kind': 'event',
'children': And(list, lambda l: len(l) == 0)
}
class TestNotificationUtils(OsfTestCase):
def setUp(self):
super(TestNotificationUtils, self).setUp()
self.user = factories.UserFactory()
self.project = factories.ProjectFactory(creator=self.user)
self.project_subscription = NotificationSubscription.find_one(
Q('node', 'eq', self.project) &
Q('_id', 'eq', self.project._id + '_comments') &
Q('event_name', 'eq', 'comments')
)
self.user.notifications_configured[self.project._id] = True
self.user.save()
self.node = factories.NodeFactory(parent=self.project, creator=self.user)
self.node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_' + 'comments',
node=self.node,
event_name='comments'
)
self.node_comments_subscription.save()
self.node_comments_subscription.email_transactional.add(self.user)
self.node_comments_subscription.save()
self.node_subscription = list(NotificationSubscription.find(Q('node', 'eq', self.node)))
self.user_subscription = [factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'comment_replies',
user=self.user,
event_name='comment_replies'
),
factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_comment',
user=self.user,
event_name='global_comment'
),
factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_file_updated',
user=self.user,
event_name='global_file_updated'
)]
for x in self.user_subscription:
x.save()
for x in self.user_subscription:
x.email_transactional.add(self.user)
for x in self.user_subscription:
x.save()
def test_to_subscription_key(self):
key = utils.to_subscription_key('xyz', 'comments')
assert_equal(key, 'xyz_comments')
def test_from_subscription_key(self):
parsed_key = utils.from_subscription_key('xyz_comment_replies')
assert_equal(parsed_key, {
'uid': 'xyz',
'event': 'comment_replies'
})
def test_get_all_user_subscriptions(self):
user_subscriptions = list(utils.get_all_user_subscriptions(self.user))
assert_in(self.project_subscription, user_subscriptions)
assert_in(self.node_comments_subscription, user_subscriptions)
for x in self.user_subscription:
assert_in(x, user_subscriptions)
assert_equal(len(user_subscriptions), 6)
def test_get_all_node_subscriptions_given_user_subscriptions(self):
user_subscriptions = utils.get_all_user_subscriptions(self.user)
node_subscription_ids = [x._id for x in utils.get_all_node_subscriptions(self.user, self.node,
user_subscriptions=user_subscriptions)]
expected_node_subscription_ids = [x._id for x in self.node_subscription]
assert_items_equal(node_subscription_ids, expected_node_subscription_ids)
def test_get_all_node_subscriptions_given_user_and_node(self):
node_subscription_ids = [x._id for x in utils.get_all_node_subscriptions(self.user, self.node)]
expected_node_subscription_ids = [x._id for x in self.node_subscription]
assert_items_equal(node_subscription_ids, expected_node_subscription_ids)
def test_get_configured_project_ids_does_not_return_user_or_node_ids(self):
configured_nodes = utils.get_configured_projects(self.user)
configured_ids = [n._id for n in configured_nodes]
# No duplicates!
assert_equal(len(configured_nodes), 1)
assert_in(self.project._id, configured_ids)
assert_not_in(self.node._id, configured_ids)
assert_not_in(self.user._id, configured_ids)
def test_get_configured_project_ids_excludes_deleted_projects(self):
project = factories.ProjectFactory()
project.is_deleted = True
project.save()
assert_not_in(project, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_excludes_node_with_project_category(self):
node = factories.NodeFactory(parent=self.project, category='project')
assert_not_in(node, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_includes_top_level_private_projects_if_subscriptions_on_node(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
node=node,
event_name='comments'
)
node_comments_subscription.save()
node_comments_subscription.email_transactional.add(node.creator)
node_comments_subscription.save()
node.creator.notifications_configured[node._id] = True
node.creator.save()
configured_project_nodes = utils.get_configured_projects(node.creator)
assert_in(private_project, configured_project_nodes)
def test_get_configured_project_ids_excludes_private_projects_if_no_subscriptions_on_node(self):
user = factories.UserFactory()
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node.add_contributor(user)
utils.remove_contributor_from_subscriptions(node, user)
configured_project_nodes = utils.get_configured_projects(user)
assert_not_in(private_project, configured_project_nodes)
def test_get_parent_notification_type(self):
nt = utils.get_parent_notification_type(self.node, 'comments', self.user)
assert_equal(nt, 'email_transactional')
def test_get_parent_notification_type_no_parent_subscriptions(self):
node = factories.NodeFactory()
nt = utils.get_parent_notification_type(node._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_no_parent(self):
project = factories.ProjectFactory()
nt = utils.get_parent_notification_type(project._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_handles_user_id(self):
nt = utils.get_parent_notification_type(self.user._id, 'comments', self.user)
assert_equal(nt, None)
def test_format_data_project_settings(self):
data = utils.format_data(self.user, [self.project])
parent_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
child_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
expected_new = [['event'], 'event']
schema = subscription_schema(self.project, expected_new)
assert schema.validate(data)
assert has(data, parent_event)
assert has(data, child_event)
def test_format_data_node_settings(self):
data = utils.format_data(self.user, [self.node])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_includes_admin_view_only_component_subscriptions(self):
# Test private components in which parent project admins are not contributors still appear in their
# notifications settings.
node = factories.NodeFactory(parent=self.project)
data = utils.format_data(self.user, [self.project])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event'], ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_data_excludes_pointers(self):
project = factories.ProjectFactory()
pointed = factories.ProjectFactory()
project.add_pointer(pointed, Auth(project.creator))
project.creator.notifications_configured[project._id] = True
project.creator.save()
configured_project_nodes = utils.get_configured_projects(project.creator)
data = utils.format_data(project.creator, configured_project_nodes)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_data_user_subscriptions_includes_private_parent_if_configured_children(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
node=node,
event_name='comments'
)
node_comments_subscription.save()
node_comments_subscription.email_transactional.add(node.creator)
node_comments_subscription.save()
node.creator.notifications_configured[node._id] = True
node.creator.save()
configured_project_nodes = utils.get_configured_projects(node.creator)
data = utils.format_data(node.creator, configured_project_nodes)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_data_user_subscriptions_if_children_points_to_parent(self):
private_project = factories.ProjectFactory(creator=self.user)
node = factories.NodeFactory(parent=private_project, creator=self.user)
node.add_pointer(private_project, Auth(self.user))
node.save()
node_comments_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
node=node,
event_name='comments'
)
node_comments_subscription.save()
node_comments_subscription.email_transactional.add(node.creator)
node_comments_subscription.save()
node.creator.notifications_configured[node._id] = True
node.creator.save()
configured_project_nodes = utils.get_configured_projects(node.creator)
data = utils.format_data(node.creator, configured_project_nodes)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_user_subscriptions(self):
data = utils.format_user_subscriptions(self.user)
expected = [
{
'event': {
'title': 'global_file_updated',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_file_updated'],
'notificationType': 'email_transactional',
'parent_notification_type': None,
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_mentions',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_mentions'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}, {
'event': {
'title': 'global_comments',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
},
]
assert_items_equal(data, expected)
def test_get_global_notification_type(self):
notification_type = utils.get_global_notification_type(self.user_subscription[1] ,self.user)
assert_equal('email_transactional', notification_type)
def test_check_if_all_global_subscriptions_are_none_false(self):
all_global_subscriptions_none = utils.check_if_all_global_subscriptions_are_none(self.user)
assert_false(all_global_subscriptions_none)
def test_check_if_all_global_subscriptions_are_none_true(self):
for x in self.user_subscription:
x.none.add(self.user)
x.email_transactional.remove(self.user)
for x in self.user_subscription:
x.save()
all_global_subscriptions_none = utils.check_if_all_global_subscriptions_are_none(self.user)
assert_true(all_global_subscriptions_none)
def test_format_data_user_settings(self):
data = utils.format_user_and_project_subscriptions(self.user)
expected = [
{
'node': {
'id': self.user._id,
'title': 'Default Notification Settings',
'help': 'These are default settings for new projects you create or are added to. Modifying these settings will not modify settings on existing projects.'
},
'kind': 'heading',
'children': utils.format_user_subscriptions(self.user)
},
{
'node': {
'help': 'These are settings for each of your projects. Modifying these settings will only modify the settings for the selected project.',
'id': '',
'title': 'Project Notifications'
},
'kind': 'heading',
'children': utils.format_data(self.user, utils.get_configured_projects(self.user))
}]
assert_equal(data, expected)
def test_serialize_user_level_event(self):
user_subscriptions = [x for x in utils.get_all_user_subscriptions(self.user)]
user_subscription = None
for subscription in user_subscriptions:
if 'global_comment_replies' in getattr(subscription, 'event_name'):
user_subscription = subscription
data = utils.serialize_event(self.user, event_description='global_comment_replies',
subscription=user_subscription)
expected = {
'event': {
'title': 'global_comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['global_comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
assert_equal(data, expected)
def test_serialize_node_level_event(self):
node_subscriptions = [x for x in utils.get_all_node_subscriptions(self.user, self.node)]
data = utils.serialize_event(user=self.user, event_description='comments',
subscription=node_subscriptions[0], node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
def test_serialize_node_level_event_that_adopts_parent_settings(self):
user = factories.UserFactory()
self.project.add_contributor(contributor=user, permissions=['read'])
self.project.save()
self.node.add_contributor(contributor=user, permissions=['read'])
self.node.save()
# set up how it was in original test - remove existing subscriptions
utils.remove_contributor_from_subscriptions(self.node, user)
node_subscriptions = [x for x in utils.get_all_node_subscriptions(user, self.node)]
data = utils.serialize_event(user=user, event_description='comments',
subscription=node_subscriptions, node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
class TestNotificationsDict(OsfTestCase):
def test_notifications_dict_add_message_returns_proper_format(self):
d = utils.NotificationsDict()
message = {
'message': 'Freddie commented on your project',
'timestamp': timezone.now()
}
message2 = {
'message': 'Mercury commented on your component',
'timestamp': timezone.now()
}
d.add_message(['project'], message)
d.add_message(['project', 'node'], message2)
expected = {
'messages': [],
'children': collections.defaultdict(
utils.NotificationsDict, {
'project': {
'messages': [message],
'children': collections.defaultdict(utils.NotificationsDict, {
'node': {
'messages': [message2],
'children': collections.defaultdict(utils.NotificationsDict, {})
}
})
}
}
)}
assert_equal(d, expected)
class TestCompileSubscriptions(NotificationTestCase):
def setUp(self):
super(TestCompileSubscriptions, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.user_3 = factories.UserFactory()
self.user_4 = factories.UserFactory()
# Base project + 1 project shared with 3 + 1 project shared with 2
self.base_project = factories.ProjectFactory(is_public=False, creator=self.user_1)
self.shared_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
# Adding contributors
for node in [self.base_project, self.shared_node, self.private_node]:
node.add_contributor(self.user_2, permissions='admin')
self.base_project.add_contributor(self.user_3, permissions='write')
self.shared_node.add_contributor(self.user_3, permissions='write')
# Setting basic subscriptions
self.base_sub = factories.NotificationSubscriptionFactory(
_id=self.base_project._id + '_file_updated',
node=self.base_project,
event_name='file_updated'
)
self.base_sub.save()
self.shared_sub = factories.NotificationSubscriptionFactory(
_id=self.shared_node._id + '_file_updated',
node=self.shared_node,
event_name='file_updated'
)
self.shared_sub.save()
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
node=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
def test_no_subscription(self):
node = factories.NodeFactory()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_no_subscribers(self):
node = factories.NodeFactory()
node_sub = factories.NotificationSubscriptionFactory(
_id=node._id + '_file_updated',
node=node,
event_name='file_updated'
)
node_sub.save()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_parent(self):
# Basic sub check
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.base_project, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_parent_from_child(self):
# checks the parent sub is the one to appear without a child sub
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_both_from_child(self):
# checks that only one sub is in the list.
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
self.shared_sub.email_transactional.add(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_diff_subs_to_both_from_child(self):
# Check that the child node sub overrides the parent node sub
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
self.shared_sub.none.add(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [self.user_1._id], 'email_digest': []}, result)
def test_user_wo_permission_on_child_node_not_listed(self):
# Tests to see if a user without permission gets an Email about a node they cannot see.
self.base_sub.email_transactional.add(self.user_3)
self.base_sub.save()
result = emails.compile_subscriptions(self.private_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_several_nodes_deep(self):
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [self.user_1._id], 'email_digest': [], 'none': []})
def test_several_nodes_deep_precedence(self):
self.base_sub.email_transactional.add(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node4_subscription = factories.NotificationSubscriptionFactory(
_id=node4._id + '_file_updated',
node=node4,
event_name='file_updated'
)
node4_subscription.save()
node4_subscription.email_digest.add(self.user_1)
node4_subscription.save()
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [], 'email_digest': [self.user_1._id], 'none': []})
class TestMoveSubscription(NotificationTestCase):
def setUp(self):
super(TestMoveSubscription, self).setUp()
self.blank = {key: [] for key in constants.NOTIFICATION_TYPES} # For use where it is blank.
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.project, is_public=False, creator=self.user_1)
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
node=self.project,
event_name='file_updated'
)
self.sub.email_transactional.add(self.user_1)
self.sub.save()
self.file_sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_xyz42_file_updated',
node=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_separate_users(self):
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
subbed, removed = utils.separate_users(
self.private_node, [self.user_2._id, self.user_3._id, self.user_4._id]
)
assert_equal([self.user_2._id, self.user_3._id], subbed)
assert_equal([self.user_4._id], removed)
def test_event_subs_same(self):
self.file_sub.email_transactional.add(self.user_2, self.user_3, self.user_4)
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_event_nodes_same(self):
self.file_sub.email_transactional.add(self.user_2, self.user_3, self.user_4)
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.project)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': []}, results)
def test_move_sub(self):
# Tests old sub is replaced with new sub.
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
self.file_sub.reload()
assert_equal('abc42_file_updated', self.file_sub.event_name)
assert_equal(self.private_node, self.file_sub.owner)
assert_equal(self.private_node._id + '_abc42_file_updated', self.file_sub._id)
def test_move_sub_with_none(self):
# Attempt to reproduce an error that is seen when moving files
self.project.add_contributor(self.user_2, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.none.add(self.user_2)
self.file_sub.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': [self.user_2._id]}, results)
def test_remove_one_user(self):
# One user doesn't have permissions on the node the sub is moved to. Should be listed.
self.file_sub.email_transactional.add(self.user_2, self.user_3, self.user_4)
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_remove_one_user_warn_another(self):
# Two users do not have permissions on new node, but one has a project sub. Both should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.file_sub.email_transactional.add(self.user_2, self.user_4)
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_true(self.sub.email_digest.filter(id=self.user_3.id).exists()) # Is not removed from the project subscription.
def test_warn_user(self):
# One user with a project sub does not have permission on new node. User should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
self.file_sub.email_transactional.add(self.user_2)
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_in(self.user_3, self.sub.email_digest.all()) # Is not removed from the project subscription.
def test_user_node_subbed_and_not_removed(self):
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.add(self.user_3)
self.sub.save()
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_false(self.file_sub.email_digest.filter().exists())
# Regression test for commit ea15186
def test_garrulous_event_name(self):
self.file_sub.email_transactional.add(self.user_2, self.user_3, self.user_4)
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('complicated/path_to/some/file/ASDFASDF.txt_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': []}, results)
class TestSendEmails(NotificationTestCase):
def setUp(self):
super(TestSendEmails, self).setUp()
self.user = factories.AuthUserFactory()
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_' + 'comments',
node=self.project,
event_name='comments'
)
self.project_subscription.save()
self.project_subscription.email_transactional.add(self.project.creator)
self.project_subscription.save()
self.node = factories.NodeFactory(parent=self.project)
self.node_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_comments',
node=self.node,
event_name='comments'
)
self.node_subscription.save()
self.user_subscription = factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'global_comment_replies',
node=self.node,
event_name='global_comment_replies'
)
self.user_subscription.email_transactional.add(self.user)
self.user_subscription.save()
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscription(self, mock_store):
node = factories.ProjectFactory()
user = factories.AuthUserFactory()
emails.notify('comments', user=user, node=node, timestamp=timezone.now())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscribers(self, mock_store):
node = factories.NodeFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
node=node,
event_name='comments'
)
node_subscription.save()
emails.notify('comments', user=self.user, node=node, timestamp=timezone.now())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_with_correct_args(self, mock_store):
time_now = timezone.now()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now)
assert_true(mock_store.called)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', self.user,
self.node, time_now)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_to_users_subscribed_to_none(self, mock_store):
node = factories.NodeFactory()
user = factories.UserFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
node=node,
event_name='comments'
)
node_subscription.save()
node_subscription.none.add(user)
node_subscription.save()
sent = emails.notify('comments', user=user, node=node, timestamp=timezone.now())
assert_false(mock_store.called)
assert_equal(sent, [])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_mentions_does_not_send_to_mentioned_users_subscribed_to_none(self, mock_store):
node = factories.NodeFactory()
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_global_mentions',
node=self.node,
event_name='global_mentions'
).add_user_to_subscription(user, 'none')
time_now = timezone.now()
sent = emails.notify_mentions('global_mentions', user=user, node=node, timestamp=time_now, new_mentions=[user._id])
assert_false(mock_store.called)
assert_equal(sent, [])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_mentions_does_send_to_mentioned_users(self, mock_store):
user = factories.UserFactory()
factories.NotificationSubscriptionFactory(
_id=user._id + '_global_mentions',
node=self.node,
event_name='global_mentions'
).add_user_to_subscription(user, 'email_transactional')
node = factories.ProjectFactory(creator=user)
time_now = timezone.now()
emails.notify_mentions('global_mentions', user=user, node=node, timestamp=time_now, new_mentions=[user._id])
assert_true(mock_store.called)
mock_store.assert_called_with([node.creator._id], 'email_transactional', 'mentions', user,
node, time_now, new_mentions=[node.creator._id])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_event_if_comment_is_direct_reply(self, mock_store):
time_now = timezone.now()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now, target_user=self.project.creator)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comment_replies',
self.user, self.node, time_now, target_user=self.project.creator)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_when_target_user_is_subscribed_via_user_settings(self, mock_store):
time_now = timezone.now()
emails.notify('global_comment_replies', user=self.project.creator, node=self.node, timestamp=time_now, target_user=self.user)
mock_store.assert_called_with([self.user._id], 'email_transactional', 'comment_replies',
self.project.creator, self.node, time_now, target_user=self.user)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply(self, mock_store):
user = factories.UserFactory()
time_now = timezone.now()
emails.notify('comments', user=user, node=self.node, timestamp=time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
@mock.patch('website.mails.send_mail')
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_comment_if_they_reply_to_their_own_comment(self, mock_store, mock_send_mail):
time_now = timezone.now()
emails.notify('comments', user=self.project.creator, node=self.project, timestamp=time_now,
target_user=self.project.creator)
assert_false(mock_store.called)
assert_false(mock_send_mail.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply_on_component(self, mock_store):
# Test that comment replies on components that are not direct replies to the subscriber use the
# "comments" email template.
user = factories.UserFactory()
time_now = timezone.now()
emails.notify('comments', user, self.node, time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
def test_check_node_node_none(self):
subs = emails.check_node(None, 'comments')
assert_equal(subs, {'email_transactional': [], 'email_digest': [], 'none': []})
def test_check_node_one(self):
subs = emails.check_node(self.project, 'comments')
assert_equal(subs, {'email_transactional': [self.project.creator._id], 'email_digest': [], 'none': []})
@mock.patch('website.project.views.comment.notify')
def test_check_user_comment_reply_subscription_if_email_not_sent_to_target_user(self, mock_notify):
# user subscribed to comment replies
user = factories.UserFactory()
user_subscription = factories.NotificationSubscriptionFactory(
_id=user._id + '_comments',
user=user,
event_name='comment_replies'
)
user_subscription.email_transactional.add(user)
user_subscription.save()
# user is not subscribed to project comment notifications
project = factories.ProjectFactory()
# user comments on project
target = factories.CommentFactory(node=project, user=user)
content = 'hammer to fall'
# reply to user (note: notify is called from Comment.create)
reply = Comment.create(
auth=Auth(project.creator),
user=project.creator,
node=project,
content=content,
target=Guid.load(target._id),
root_target=Guid.load(project._id),
)
assert_true(mock_notify.called)
assert_equal(mock_notify.call_count, 2)
def test_get_settings_url_for_node(self):
url = emails.get_settings_url(self.project._id, self.user)
assert_equal(url, self.project.absolute_url + 'settings/')
def test_get_settings_url_for_user(self):
url = emails.get_settings_url(self.user._id, self.user)
assert_equal(url, web_url_for('user_notifications', _absolute=True))
def test_get_node_lineage(self):
node_lineage = emails.get_node_lineage(self.node)
assert_equal(node_lineage, [self.project._id, self.node._id])
def test_localize_timestamp(self):
timestamp = timezone.now()
self.user.timezone = 'America/New_York'
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_timezone(self):
timestamp = timezone.now()
self.user.timezone = ''
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone('Etc/UTC')
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_locale(self):
timestamp = timezone.now()
self.user.timezone = 'America/New_York'
self.user.locale = ''
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale('en')
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_handles_unicode(self):
timestamp = timezone.now()
self.user.timezone = 'Europe/Moscow'
self.user.locale = 'ru_RU'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
class TestSendDigest(OsfTestCase):
def setUp(self):
super(TestSendDigest, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.project = factories.ProjectFactory()
self.timestamp = timezone.now()
def test_group_notifications_by_user_transactional(self):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
user=self.user_1,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user=self.user_2,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user=self.user_2,
send_type='email_digest',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is a digest)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': self.user_1._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d._id
}]
},
{
u'user_id': self.user_2._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d2._id
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
def test_group_notifications_by_user_digest(self):
send_type = 'email_digest'
d = factories.NotificationDigestFactory(
user=self.user_1,
send_type=send_type,
event='comment_replies',
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user=self.user_2,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user=self.user_2,
send_type='email_transactional',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is transactional)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': unicode(self.user_1._id),
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': unicode(d._id)
}]
},
{
u'user_id': unicode(self.user_2._id),
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': unicode(d2._id)
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
@mock.patch('website.mails.send_mail')
def test_send_users_email_called_with_correct_args(self, mock_send_mail):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
send_type=send_type,
event='comment_replies',
timestamp=timezone.now(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
d.save()
user_groups = get_users_emails(send_type)
send_users_email(send_type)
assert_true(mock_send_mail.called)
assert_equals(mock_send_mail.call_count, len(user_groups))
last_user_index = len(user_groups) - 1
user = OSFUser.load(user_groups[last_user_index]['user_id'])
email_notification_ids = [message['_id'] for message in user_groups[last_user_index]['info']]
args, kwargs = mock_send_mail.call_args
assert_equal(kwargs['to_addr'], user.username)
assert_equal(kwargs['mimetype'], 'html')
assert_equal(kwargs['mail'], mails.DIGEST)
assert_equal(kwargs['name'], user.fullname)
message = group_by_node(user_groups[last_user_index]['info'])
assert_equal(kwargs['message'], message)
assert_equal(kwargs['callback'], remove_notifications(email_notification_ids=email_notification_ids))
def test_remove_sent_digest_notifications(self):
d = factories.NotificationDigestFactory(
event='comment_replies',
timestamp=timezone.now(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
digest_id = d._id
remove_notifications(email_notification_ids=[digest_id])
with assert_raises(NoResultsFound):
NotificationDigest.find_one(Q('_id', 'eq', digest_id))
|
|
#!/usr/bin/env python
import subprocess
import threading
import shelve
import os
from numpy import arange, sin, pi, linspace
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
import wx.lib.filebrowsebutton as filebrowse
import tx
presets = shelve.open(os.path.expanduser("~/.wxtx"))
modes = ["AM", "FM", "WBFM", "LSB", "USB", "STEREO"]
units = ["MHz", "kHz", "Hz"]
sources = ["Soundcard", "File"]
def setup_plot(figure, axes):
axes.clear()
figure.subplots_adjust(left=0.2, bottom=0.2)
figure.set_facecolor('0.1')
axes.spines['bottom'].set_color('0.9')
axes.spines['top'].set_color('0.9')
axes.spines['right'].set_color('0.9')
axes.spines['left'].set_color('0.9')
axes.yaxis.label.set_color('0.9')
axes.xaxis.label.set_color('0.9')
axes.tick_params(axis='x', colors='0.9')
axes.tick_params(axis='y', colors='0.9')
axes.set_xlabel("Frequency (kHz)")
axes.set_ylabel("Magnitude (dB)")
axes.grid(color='0.8')
class CanvasPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.transmitter=None
self.figure = Figure(figsize=(0.5, 0.5))
self.axes = self.figure.add_subplot(111, axisbg='0.1')
self.canvas = FigureCanvas(self, -1, self.figure)
setup_plot(self.figure, self.axes)
self.vsizer = wx.BoxSizer(wx.VERTICAL)
self.sizer_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.tx_sizer = wx.BoxSizer(wx.VERTICAL)
self.rx_sizer = wx.BoxSizer(wx.VERTICAL)
self.settings_sizer = wx.BoxSizer(wx.VERTICAL)
self.preset_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.freq_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.freq_offset_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.gain_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.ppm_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.squelch_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.lpf_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.fm_deviation_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.source_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.destination_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.mode_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.presets = wx.Choice(self, choices=sorted(presets.keys()))
self.save_preset = wx.Button(self, label="Save as...")
self.del_preset = wx.Button(self, label="delete")
self.frequency = wx.TextCtrl(self, value="30")
self.frequency_units = wx.Choice(self, choices=units)
self.rx_frequency_offset = wx.TextCtrl(self, value="0")
self.rx_gain = wx.TextCtrl(self, value="automatic")
self.rx_ppm = wx.TextCtrl(self, value="0")
self.rx_squelch = wx.TextCtrl(self, value="0")
self.lpf = wx.TextCtrl(self, value="3000")
self.fm_deviation = wx.TextCtrl(self, value="5000")
self.fm_deviation.Disable()
self.mode = wx.Choice(self, choices=modes)
self.source = wx.Choice(self, choices=sources)
self.destination = wx.Choice(self, choices=sources)
self.input_file_button = filebrowse.FileBrowseButton(self,
labelText="Input File:")
self.device_button = filebrowse.FileBrowseButton(self,
labelText="Transmitter:", initialValue="/dev/ttyUSB1")
self.input_file_button.Disable()
self.output_file_button = filebrowse.FileBrowseButton(self,
labelText="Output File:", fileMode=wx.SAVE)
self.output_file_button.Disable()
self.tx = wx.ToggleButton(self, label="TX")
self.rx = wx.ToggleButton(self, label="RX")
self.Bind(wx.EVT_TOGGLEBUTTON, self.on_transmit, self.tx)
self.Bind(wx.EVT_TOGGLEBUTTON, self.on_receive, self.rx)
self.Bind(wx.EVT_CHOICE, self.on_mode, self.mode)
self.Bind(wx.EVT_CHOICE, self.on_source, self.source)
self.Bind(wx.EVT_CHOICE, self.on_destination, self.destination)
self.Bind(wx.EVT_CHOICE, self.on_preset, self.presets)
self.Bind(wx.EVT_BUTTON, self.on_save_preset, self.save_preset)
self.Bind(wx.EVT_BUTTON, self.on_del_preset, self.del_preset)
self.vsizer.Add(self.canvas, 1, wx.EXPAND | wx.ALL)
self.preset_sizer.Add(wx.StaticText(self, label="Preset: "), 0, wx.CENTER)
self.preset_sizer.Add(self.presets, 1)
self.preset_sizer.Add(self.save_preset, 0.5)
self.preset_sizer.Add(self.del_preset, 0.5)
self.vsizer.Add(self.preset_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.tx_sizer.Add(wx.StaticText(self, label="Transmitter Settings"), 0, wx.CENTER | wx.ALL, 10)
self.freq_sizer.Add(wx.StaticText(self, label="Frequency: "), 0, wx.CENTER)
self.freq_sizer.Add(self.frequency, 1)
self.freq_sizer.Add(self.frequency_units, 0.5)
self.tx_sizer.Add(self.freq_sizer, 0, wx.EXPAND | wx.ALL, 3)
self.lpf_sizer.Add(wx.StaticText(self, label="Audio Cutoff (Hz): "), 0, wx.CENTER)
self.lpf_sizer.Add(self.lpf, 1)
self.tx_sizer.Add(self.lpf_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.fm_deviation_sizer.Add(wx.StaticText(self, label="FM Deviation (Hz): "), 0, wx.CENTER)
self.fm_deviation_sizer.Add(self.fm_deviation, 1)
self.tx_sizer.Add(self.fm_deviation_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.mode_sizer.Add(wx.StaticText(self, label="Mode:"), 0, wx.CENTRE)
self.mode_sizer.Add(self.mode, 1, wx.EXPAND | wx.ALL)
self.tx_sizer.Add(self.mode_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.source_sizer.Add(wx.StaticText(self, label="Source:"), 0, wx.CENTRE)
self.source_sizer.Add(self.source, 1, wx.EXPAND | wx.ALL)
self.tx_sizer.Add(self.source_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.tx_sizer.Add(self.input_file_button, 0, wx.EXPAND | wx.ALL, 5)
self.tx_sizer.Add(self.device_button, 0, wx.EXPAND | wx.ALL, 5)
self.tx_sizer.Add(self.tx, 0, wx.EXPAND | wx.ALL)
self.sizer_sizer.Add(self.tx_sizer, 1, wx.EXPAND | wx.ALL)
self.sizer_sizer.Add(wx.StaticLine(self, style=wx.LI_VERTICAL), 0, wx.ALL|wx.EXPAND, 5)
self.rx_sizer.Add(wx.StaticText(self, label="Receiver Settings"), 0, wx.CENTRE | wx.ALL, 10)
self.freq_offset_sizer.Add(wx.StaticText(self, label="Frequency Offset (Hz):"), 0, wx.CENTRE)
self.freq_offset_sizer.Add(self.rx_frequency_offset, 1, wx.EXPAND | wx.ALL)
self.rx_sizer.Add(self.freq_offset_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.gain_sizer.Add(wx.StaticText(self, label="Gain (dB):"), 0, wx.CENTRE)
self.gain_sizer.Add(self.rx_gain, 1)
self.rx_sizer.Add(self.gain_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.ppm_sizer.Add(wx.StaticText(self, label="Frequency Correction (ppm):"), 0, wx.CENTRE)
self.ppm_sizer.Add(self.rx_ppm, 1)
self.rx_sizer.Add(self.ppm_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.squelch_sizer.Add(wx.StaticText(self, label="Squelch (dB):"), 0, wx.CENTRE)
self.squelch_sizer.Add(self.rx_squelch, 1)
self.rx_sizer.Add(self.squelch_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.rx_sizer.Add((10,10), 1, wx.EXPAND | wx.ALL)
self.destination_sizer.Add(wx.StaticText(self, label="Destination:"), 0, wx.CENTRE)
self.destination_sizer.Add(self.destination, 1, wx.EXPAND | wx.ALL)
self.rx_sizer.Add(self.destination_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.rx_sizer.Add(self.output_file_button, 0, wx.EXPAND | wx.ALL, 5)
self.rx_sizer.Add(self.rx, 0, wx.EXPAND | wx.ALL)
self.sizer_sizer.Add(self.rx_sizer, 1, wx.EXPAND | wx.ALL)
self.vsizer.Add(self.sizer_sizer, 0, wx.EXPAND, 10)
self.SetSizer(self.vsizer)
self.Fit()
self.line = None
self.t1 = wx.Timer(self)
self.t1.Start(100)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.transmitter = None
self.transmitter_thread = None
self.transmitter_pipe = None
self.receiver_pipe = None
self.sox_pipe = None
def on_timer(self, event):
if self.transmitter is not None:
if not self.transmitter_thread.is_alive():
self.stop_transmit()
return
if hasattr(self.transmitter.modulator, "fft"):
self.update_plot()
def update_plot(self):
fft = self.transmitter.modulator.fft
nyq = self.transmitter.modulator.nyq
f = linspace(-nyq, nyq, len(fft))
if self.line is None:
setup_plot(self.figure, self.axes)
self.axes.set_xlim([-nyq, nyq])
self.axes.set_ylim([-100.0, 100.0])
self.figure.canvas.draw()
self.background = self.figure.canvas.copy_from_bbox(self.axes.bbox)
self.line = self.axes.plot(f, fft, linewidth='1.0', color='green')[0]
else:
self.figure.canvas.restore_region(self.background)
self.line.set_xdata(f)
self.line.set_ydata(fft)
self.axes.draw_artist(self.line)
self.figure.canvas.blit()
def settings2gui(self, evt=None):
frequency, frequency_units, cutoff, deviation, mode, rx_frequency_offset = self.settings
self.frequency.SetValue(str(frequency))
self.frequency_units.SetSelection(units.index(frequency_units))
self.lpf.SetValue(str(cutoff))
self.fm_deviation.SetValue(str(deviation))
self.mode.SetSelection(modes.index(mode))
self.rx_frequency_offset.SetValue(str(rx_frequency_offset))
if mode.upper() == "AM":
self.fm_deviation.Disable()
elif mode.upper() == "LSB":
self.fm_deviation.Disable()
elif mode.upper() == "USB":
self.fm_deviation.Disable()
elif mode.upper() == "FM":
self.fm_deviation.Enable()
elif mode.upper() == "WBFM":
self.fm_deviation.Enable()
elif mode.upper() == "STEREO":
self.fm_deviation.Enable()
def gui2settings(self, evt=None):
frequency = float(self.frequency.GetValue())
frequency_units = units[self.frequency_units.GetCurrentSelection()]
cutoff = float(self.lpf.GetValue())
deviation = float(self.fm_deviation.GetValue())
mode = modes[self.mode.GetCurrentSelection()]
rx_frequency_offset = float(self.rx_frequency_offset.GetValue())
self.settings = frequency, frequency_units, cutoff, deviation, mode, rx_frequency_offset
def on_preset(self, event):
preset_name = sorted(presets.keys())[self.presets.GetCurrentSelection()]
self.settings = presets[str(preset_name)]
self.settings2gui()
def on_save_preset(self, event):
dlg = wx.TextEntryDialog(
self, 'Create Preset')
self.gui2settings()
if dlg.ShowModal() == wx.ID_OK:
preset_name = dlg.GetValue()
presets[str(preset_name)] = self.settings
presets.sync()
self.presets.SetItems(sorted(presets.keys()))
dlg.Destroy()
def on_del_preset(self, event):
preset_name = sorted(presets.keys())[self.presets.GetCurrentSelection()]
presets.pop(preset_name)
presets.sync()
self.presets.SetItems(sorted(presets.keys()))
def on_source(self, event):
source = sources[self.source.GetCurrentSelection()]
if source.upper() == "FILE":
self.input_file_button.Enable()
elif source.upper() == "SOUNDCARD":
self.input_file_button.Disable()
def on_destination(self, event):
destination = sources[self.destination.GetCurrentSelection()]
if destination.upper() == "FILE":
self.output_file_button.Enable()
elif destination.upper() == "SOUNDCARD":
self.output_file_button.Disable()
def on_mode(self, event):
mode = modes[self.mode.GetCurrentSelection()]
if mode.upper() == "AM":
self.lpf.SetValue("5000")
self.fm_deviation.SetValue("5000")
self.fm_deviation.Disable()
elif mode.upper() == "LSB":
self.lpf.SetValue("3000")
self.fm_deviation.SetValue("5000")
self.fm_deviation.Disable()
elif mode.upper() == "USB":
self.lpf.SetValue("3000")
self.fm_deviation.SetValue("5000")
self.fm_deviation.Disable()
elif mode.upper() == "FM":
self.lpf.SetValue("5000")
self.fm_deviation.SetValue("5000")
self.fm_deviation.Enable()
elif mode.upper() == "WBFM":
self.lpf.SetValue("15000")
self.fm_deviation.SetValue("150000")
self.fm_deviation.Enable()
elif mode.upper() == "STEREO":
self.lpf.SetValue("15000")
self.fm_deviation.SetValue("150000")
self.fm_deviation.Enable()
def on_receive(self, event):
if event.IsChecked():
self.gui2settings()
frequency, frequency_units, cutoff, deviation, mode, rx_frequency_offset = self.settings
destination = sources[self.destination.GetCurrentSelection()]
output_file = self.output_file_button.GetValue()
gain = self.rx_gain.GetValue()
ppm = float(self.rx_ppm.GetValue())
squelch = float(self.rx_squelch.GetValue())
if frequency_units == "MHz":
frequency *= 1e6
elif frequency_units == "kHz":
frequency *= 1e3
self.receive(
float(frequency)+float(rx_frequency_offset),
mode,
destination,
output_file,
gain,
ppm,
squelch
)
else:
if self.receiver_pipe is not None:
self.stop_receive()
def receive(self, frequency, mode, destination, output_file, gain, ppm, squelch):
if mode.upper() == "AM":
fs = 12000
channels = 1
elif mode.upper() == "USB":
fs = 12000
channels = 1
elif mode.upper() == "LSB":
fs = 12000
channels = 1
elif mode.upper() == "FM":
fs = 12000
channels = 1
elif mode.upper() == "WBFM":
fs = 48000
channels = 1
elif mode.upper() == "STEREO":
mode = wbfm
fs = 48000
channels = 2
self.receiver_pipe = subprocess.Popen(
[
"rtl_fm",
"-M",
mode.lower(),
"-f",
str(frequency),
"-s",
str(fs),
"-g",
str(gain),
"-p",
str(ppm),
"-l",
str(squelch),
"-",
],
stdout=subprocess.PIPE,
)
if destination == "File":
self.sox_pipe = subprocess.Popen(
[
"sox",
"-r",
str(fs),
"--buffer",
"256",
"-t",
"raw",
"-es",
"-b",
"16",
"-c",
"1",
"-V1",
str(output_file),
],
stdin=self.receiver_pipe.stdout,
)
else:
self.sox_pipe = subprocess.Popen(
[
"play",
"-r",
str(fs),
"--buffer",
"256",
"-t",
"raw",
"-es",
"-b",
"16",
"-c",
"1",
"-V1",
"-",
],
stdin=self.receiver_pipe.stdout,
)
def stop_receive(self):
#terminate the process creating the input data
if self.receiver_pipe is not None:
self.receiver_pipe.kill()
self.receiver_pipe = None
if self.sox_pipe is not None:
self.sox_pipe.kill()
self.sox_pipe = None
def on_transmit(self, event):
if event.IsChecked():
self.gui2settings()
frequency, frequency_units, cutoff, deviation, mode, rx_frequency_offset = self.settings
if frequency_units == "MHz":
frequency *= 1e6
elif frequency_units == "kHz":
frequency *= 1e3
source = sources[self.source.GetCurrentSelection()]
input_file = self.input_file_button.GetValue()
device = self.device_button.GetValue()
self.transmit(
frequency,
mode,
source,
input_file,
cutoff,
deviation,
device,
)
else:
if self.transmitter is not None:
self.stop_transmit()
def transmit(self,
frequency,
mode,
source,
input_file,
cutoff,
deviation,
device):
try:
if mode.upper() == "AM":
fs = 12000
channels = 1
modulator = tx.AMModulator(
frequency=frequency,
cutoff=cutoff,
sample_rate=12000
)
elif mode.upper() == "USB":
fs = 12000
channels = 1
modulator = tx.SSBModulator(
frequency=frequency,
cutoff=cutoff,
sample_rate=12000
)
elif mode.upper() == "LSB":
fs = 12000
channels = 1
modulator = tx.SSBModulator(
frequency=frequency,
sample_rate=12000,
cutoff=cutoff,
lsb = True
)
elif mode.upper() == "FM":
fs = 12000
channels = 1
modulator = tx.FMModulator(
frequency=frequency,
sample_rate=12000,
cutoff=cutoff,
fm_deviation=deviation,
)
elif mode.upper() == "WBFM":
fs = 48000
channels = 1
modulator = tx.WBFMModulator(
frequency=frequency,
sample_rate=48000,
cutoff=cutoff,
fm_deviation=deviation,
)
elif mode.upper() == "STEREO":
fs = 48000
channels = 2
modulator = tx.StereoModulator(
frequency=frequency,
sample_rate=48000,
cutoff=cutoff,
fm_deviation=deviation,
)
#Use sox to capture/resample input
if source == "File":
self.transmitter_pipe = subprocess.Popen(
[
"sox",
input_file,
"-r",
str(fs),
"-b",
"16",
"-t" ,
"raw",
"--channels",
str(channels),
"-",
],
stdout=subprocess.PIPE,
shell=False,
)
else:
self.transmitter_pipe = subprocess.Popen(
[
"rec",
"--buffer",
"256",
"-r",
str(fs),
"-b",
"16",
"-t",
"raw",
"--channels",
str(channels),
"-",
],
stdout=subprocess.PIPE,
shell=False,
)
self.transmitter = tx.Transmitter(device, modulator)
except Exception as x:
dlg = wx.MessageDialog(self,
'An exception occurred while running transmitter\n'
+ str(x),
'Transmitter Error',
wx.OK | wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
self.stop_transmit()
return
#run the transmitter in its own thread
self.transmitter_thread = threading.Thread(
group=None,
target=self.transmitter.transmit,
args=(self.transmitter_pipe.stdout,)
)
self.transmitter_thread.start()
def stop_transmit(self):
#terminate the process creating the input data
if self.transmitter is not None:
self.transmitter.stop = True
del(self.transmitter)
self.transmitter = None
if self.transmitter_thread is not None:
self.transmitter_thread.join()
self.transmitter_thread = None
if self.transmitter_pipe is not None:
print "terminating sox"
self.transmitter_pipe.kill()
self.transmitter_pipe = None
self.line = None
self.tx.SetValue(False)
app = wx.PySimpleApp()
fr = wx.Frame(None, size=(500, 600), title='wxtx')
panel = CanvasPanel(fr)
favicon = wx.Icon(os.path.join(os.path.dirname(__file__), 'favicon.ico'), wx.BITMAP_TYPE_ICO, 16, 16)
fr.SetIcon(favicon)
fr.Show()
app.MainLoop()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import eventlet
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import scheduler
from heat.tests import common
class DummyTask(object):
def __init__(self, num_steps=3):
self.num_steps = num_steps
def __call__(self, *args, **kwargs):
for i in range(1, self.num_steps + 1):
self.do_step(i, *args, **kwargs)
yield
def do_step(self, step_num, *args, **kwargs):
pass
class PollingTaskGroupTest(common.HeatTestCase):
def setUp(self):
super(PollingTaskGroupTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_group(self):
tasks = [DummyTask() for i in range(3)]
for t in tasks:
self.m.StubOutWithMock(t, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
for t in tasks:
t.do_step(1).AndReturn(None)
for t in tasks:
scheduler.TaskRunner._sleep(1).AndReturn(None)
t.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
t.do_step(3).AndReturn(None)
self.m.ReplayAll()
tg = scheduler.PollingTaskGroup(tasks)
scheduler.TaskRunner(tg)()
def test_kwargs(self):
input_kwargs = {'i': [0, 1, 2],
'i2': [0, 1, 4]}
output_kwargs = scheduler.PollingTaskGroup._kwargs(input_kwargs)
expected_kwargs = [{'i': 0, 'i2': 0},
{'i': 1, 'i2': 1},
{'i': 2, 'i2': 4}]
self.assertEqual(expected_kwargs, list(output_kwargs))
def test_kwargs_short(self):
input_kwargs = {'i': [0, 1, 2],
'i2': [0]}
output_kwargs = scheduler.PollingTaskGroup._kwargs(input_kwargs)
expected_kwargs = [{'i': 0, 'i2': 0}]
self.assertEqual(expected_kwargs, list(output_kwargs))
def test_no_kwargs(self):
output_kwargs = scheduler.PollingTaskGroup._kwargs({})
self.assertEqual([], list(output_kwargs))
def test_args(self):
input_args = ([0, 1, 2],
[0, 1, 4])
output_args = scheduler.PollingTaskGroup._args(input_args)
expected_args = [(0, 0), (1, 1), (2, 4)]
self.assertEqual(expected_args, list(output_args))
def test_args_short(self):
input_args = ([0, 1, 2],
[0])
output_args = scheduler.PollingTaskGroup._args(input_args)
expected_args = [(0, 0)]
self.assertEqual(expected_args, list(output_args))
def test_no_args(self):
output_args = scheduler.PollingTaskGroup._args([])
self.assertEqual([], list(output_args))
@contextlib.contextmanager
def _args_test(self, *arg_lists, **kwarg_lists):
dummy = DummyTask(1)
tg = scheduler.PollingTaskGroup.from_task_with_args(dummy,
*arg_lists,
**kwarg_lists)
self.m.StubOutWithMock(dummy, 'do_step')
yield dummy
self.m.ReplayAll()
scheduler.TaskRunner(tg)(wait_time=None)
def test_with_all_args(self):
with self._args_test([0, 1, 2], [0, 1, 8],
i=[0, 1, 2], i2=[0, 1, 4]) as dummy:
for i in range(3):
dummy.do_step(1, i, i * i * i, i=i, i2=i * i)
def test_with_short_args(self):
with self._args_test([0, 1, 2], [0, 1],
i=[0, 1, 2], i2=[0, 1, 4]) as dummy:
for i in range(2):
dummy.do_step(1, i, i * i, i=i, i2=i * i)
def test_with_short_kwargs(self):
with self._args_test([0, 1, 2], [0, 1, 8],
i=[0, 1], i2=[0, 1, 4]) as dummy:
for i in range(2):
dummy.do_step(1, i, i * i, i=i, i2=i * i)
def test_with_empty_args(self):
with self._args_test([], i=[0, 1, 2], i2=[0, 1, 4]):
pass
def test_with_empty_kwargs(self):
with self._args_test([0, 1, 2], [0, 1, 8], i=[]):
pass
def test_with_no_args(self):
with self._args_test(i=[0, 1, 2], i2=[0, 1, 4]) as dummy:
for i in range(3):
dummy.do_step(1, i=i, i2=i * i)
def test_with_no_kwargs(self):
with self._args_test([0, 1, 2], [0, 1, 4]) as dummy:
for i in range(3):
dummy.do_step(1, i, i * i)
class ExceptionGroupTest(common.HeatTestCase):
def test_contains_exceptions(self):
exception_group = scheduler.ExceptionGroup()
self.assertIsInstance(exception_group.exceptions, list)
def test_can_be_initialized_with_a_list_of_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertIn(ex1, exception_group.exceptions)
self.assertIn(ex2, exception_group.exceptions)
def test_can_add_exceptions_after_init(self):
ex = Exception()
exception_group = scheduler.ExceptionGroup()
exception_group.exceptions.append(ex)
self.assertIn(ex, exception_group.exceptions)
def test_str_representation_aggregates_all_exceptions(self):
ex1 = Exception("ex 1")
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
self.assertEqual("[u'ex 1', u'ex 2']", str(exception_group))
class DependencyTaskGroupTest(common.HeatTestCase):
def setUp(self):
super(DependencyTaskGroupTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
self.aggregate_exceptions = False
self.error_wait_time = None
self.reverse_order = False
@contextlib.contextmanager
def _dep_test(self, *edges):
dummy = DummyTask(getattr(self, 'steps', 3))
deps = dependencies.Dependencies(edges)
tg = scheduler.DependencyTaskGroup(
deps, dummy, reverse=self.reverse_order,
error_wait_time=self.error_wait_time,
aggregate_exceptions=self.aggregate_exceptions)
self.m.StubOutWithMock(dummy, 'do_step')
yield dummy
self.m.ReplayAll()
scheduler.TaskRunner(tg)(wait_time=None)
def test_no_steps(self):
self.steps = 0
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
with self._dep_test(('second', 'first')):
scheduler.TaskRunner._sleep(None).AndReturn(None)
def test_single_node(self):
with self._dep_test(('only', None)) as dummy:
dummy.do_step(1, 'only').AndReturn(None)
dummy.do_step(2, 'only').AndReturn(None)
dummy.do_step(3, 'only').AndReturn(None)
def test_disjoint(self):
with self._dep_test(('1', None), ('2', None)) as dummy:
dummy.do_step(1, '1').InAnyOrder('1')
dummy.do_step(1, '2').InAnyOrder('1')
dummy.do_step(2, '1').InAnyOrder('2')
dummy.do_step(2, '2').InAnyOrder('2')
dummy.do_step(3, '1').InAnyOrder('3')
dummy.do_step(3, '2').InAnyOrder('3')
def test_single_fwd(self):
with self._dep_test(('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
def test_chain_fwd(self):
with self._dep_test(('third', 'second'),
('second', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'second').AndReturn(None)
dummy.do_step(2, 'second').AndReturn(None)
dummy.do_step(3, 'second').AndReturn(None)
dummy.do_step(1, 'third').AndReturn(None)
dummy.do_step(2, 'third').AndReturn(None)
dummy.do_step(3, 'third').AndReturn(None)
def test_diamond_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid1').InAnyOrder('1')
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(2, 'mid1').InAnyOrder('2')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(3, 'mid1').InAnyOrder('3')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_complex_fwd(self):
with self._dep_test(('last', 'mid1'), ('last', 'mid2'),
('mid1', 'mid3'), ('mid1', 'first'),
('mid3', 'first'), ('mid2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1')
dummy.do_step(1, 'mid3').InAnyOrder('1')
dummy.do_step(2, 'mid2').InAnyOrder('2')
dummy.do_step(2, 'mid3').InAnyOrder('2')
dummy.do_step(3, 'mid2').InAnyOrder('3')
dummy.do_step(3, 'mid3').InAnyOrder('3')
dummy.do_step(1, 'mid1').AndReturn(None)
dummy.do_step(2, 'mid1').AndReturn(None)
dummy.do_step(3, 'mid1').AndReturn(None)
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_many_edges_fwd(self):
with self._dep_test(('last', 'e1'), ('last', 'mid1'), ('last', 'mid2'),
('mid1', 'e2'), ('mid1', 'mid3'),
('mid2', 'mid3'),
('mid3', 'e3')) as dummy:
dummy.do_step(1, 'e1').InAnyOrder('1edges')
dummy.do_step(1, 'e2').InAnyOrder('1edges')
dummy.do_step(1, 'e3').InAnyOrder('1edges')
dummy.do_step(2, 'e1').InAnyOrder('2edges')
dummy.do_step(2, 'e2').InAnyOrder('2edges')
dummy.do_step(2, 'e3').InAnyOrder('2edges')
dummy.do_step(3, 'e1').InAnyOrder('3edges')
dummy.do_step(3, 'e2').InAnyOrder('3edges')
dummy.do_step(3, 'e3').InAnyOrder('3edges')
dummy.do_step(1, 'mid3').AndReturn(None)
dummy.do_step(2, 'mid3').AndReturn(None)
dummy.do_step(3, 'mid3').AndReturn(None)
dummy.do_step(1, 'mid2').InAnyOrder('1mid')
dummy.do_step(1, 'mid1').InAnyOrder('1mid')
dummy.do_step(2, 'mid2').InAnyOrder('2mid')
dummy.do_step(2, 'mid1').InAnyOrder('2mid')
dummy.do_step(3, 'mid2').InAnyOrder('3mid')
dummy.do_step(3, 'mid1').InAnyOrder('3mid')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_dbldiamond_fwd(self):
with self._dep_test(('last', 'a1'), ('last', 'a2'),
('a1', 'b1'), ('a2', 'b1'), ('a2', 'b2'),
('b1', 'first'), ('b2', 'first')) as dummy:
dummy.do_step(1, 'first').AndReturn(None)
dummy.do_step(2, 'first').AndReturn(None)
dummy.do_step(3, 'first').AndReturn(None)
dummy.do_step(1, 'b1').InAnyOrder('1b')
dummy.do_step(1, 'b2').InAnyOrder('1b')
dummy.do_step(2, 'b1').InAnyOrder('2b')
dummy.do_step(2, 'b2').InAnyOrder('2b')
dummy.do_step(3, 'b1').InAnyOrder('3b')
dummy.do_step(3, 'b2').InAnyOrder('3b')
dummy.do_step(1, 'a1').InAnyOrder('1a')
dummy.do_step(1, 'a2').InAnyOrder('1a')
dummy.do_step(2, 'a1').InAnyOrder('2a')
dummy.do_step(2, 'a2').InAnyOrder('2a')
dummy.do_step(3, 'a1').InAnyOrder('3a')
dummy.do_step(3, 'a2').InAnyOrder('3a')
dummy.do_step(1, 'last').AndReturn(None)
dummy.do_step(2, 'last').AndReturn(None)
dummy.do_step(3, 'last').AndReturn(None)
def test_circular_deps(self):
d = dependencies.Dependencies([('first', 'second'),
('second', 'third'),
('third', 'first')])
self.assertRaises(dependencies.CircularDependencyException,
scheduler.DependencyTaskGroup, d)
def test_aggregate_exceptions_raises_all_at_the_end(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', None), ('C', None))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(1, 'C').InAnyOrder('1').AndRaise(e1)
dummy.do_step(2, 'A').InAnyOrder('2')
dummy.do_step(2, 'B').InAnyOrder('2').AndRaise(e2)
dummy.do_step(3, 'A').InAnyOrder('3')
e1 = Exception('e1')
e2 = Exception('e2')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1, e2)
self.assertEqual(set([e1, e2]), set(exc.exceptions))
def test_aggregate_exceptions_cancels_dependent_tasks_recursively(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_aggregate_exceptions_cancels_tasks_in_reverse_order(self):
def run_tasks_with_exceptions(e1=None, e2=None):
self.reverse_order = True
self.aggregate_exceptions = True
tasks = (('A', None), ('B', 'A'), ('C', 'B'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'C').AndRaise(e1)
e1 = Exception('e1')
exc = self.assertRaises(scheduler.ExceptionGroup,
run_tasks_with_exceptions, e1)
self.assertEqual([e1], exc.exceptions)
def test_exception_grace_period(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.error_wait_time = 5
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
def test_exception_grace_period_expired(self):
e1 = Exception('e1')
def run_tasks_with_exceptions():
self.steps = 5
self.error_wait_time = 0.05
def sleep():
eventlet.sleep(self.error_wait_time)
tasks = (('A', None), ('B', None), ('C', 'A'))
with self._dep_test(*tasks) as dummy:
dummy.do_step(1, 'A').InAnyOrder('1')
dummy.do_step(1, 'B').InAnyOrder('1')
dummy.do_step(2, 'A').InAnyOrder('2').AndRaise(e1)
dummy.do_step(2, 'B').InAnyOrder('2')
dummy.do_step(3, 'B')
dummy.do_step(4, 'B').WithSideEffects(sleep)
exc = self.assertRaises(type(e1), run_tasks_with_exceptions)
self.assertEqual(e1, exc)
class TaskTest(common.HeatTestCase):
def setUp(self):
super(TaskTest, self).setUp()
scheduler.ENABLE_SLEEP = True
self.addCleanup(self.m.VerifyAll)
def test_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(0).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(42).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)(wait_time=42)
def test_start_run(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion()
def test_start_run_wait_time(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(24).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
runner.run_to_completion(wait_time=24)
def test_sleep(self):
sleep_time = 42
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).AndReturn(None)
eventlet.sleep(sleep_time).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=sleep_time)
def test_sleep_zero(self):
self.m.StubOutWithMock(eventlet, 'sleep')
eventlet.sleep(0).MultipleTimes().AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=0)
def test_sleep_none(self):
self.m.StubOutWithMock(eventlet, 'sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
def test_args(self):
args = ['foo', 'bar']
kwargs = {'baz': 'quux', 'blarg': 'wibble'}
self.m.StubOutWithMock(DummyTask, '__call__')
task = DummyTask()
task(*args, **kwargs)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task, *args, **kwargs)
runner(wait_time=None)
def test_non_callable(self):
self.assertRaises(AssertionError, scheduler.TaskRunner, object())
def test_stepping(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
task.do_step(3).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertFalse(runner.step())
self.assertTrue(runner)
self.assertFalse(runner.step())
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_start_no_steps(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_start_only(self):
task = DummyTask()
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
def test_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner.start()
self.assertRaises(AssertionError, runner.start)
def test_start_cancelled(self):
runner = scheduler.TaskRunner(DummyTask())
runner.cancel()
self.assertRaises(AssertionError, runner.start)
def test_call_double_start(self):
runner = scheduler.TaskRunner(DummyTask())
runner(wait_time=None)
self.assertRaises(AssertionError, runner.start)
def test_start_function(self):
def task():
pass
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_repeated_done(self):
task = DummyTask(0)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start()
self.assertTrue(runner.step())
self.assertTrue(runner.step())
def test_timeout(self):
st = timeutils.wallclock()
def task():
while True:
yield
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertRaises(scheduler.Timeout, runner.step)
def test_timeout_return(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
return
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
def test_timeout_swallowed(self):
st = timeutils.wallclock()
def task():
while True:
try:
yield
except scheduler.Timeout:
yield
self.fail('Task still running')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
runner.start(timeout=1)
self.assertTrue(runner)
self.assertTrue(runner.step())
self.assertFalse(runner)
self.assertTrue(runner.step())
def test_cancel_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel()
self.assertTrue(runner.done())
def test_cancel_done(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertTrue(runner.step())
self.assertTrue(runner.done())
runner.cancel()
self.assertTrue(runner.done())
self.assertTrue(runner.step())
def test_cancel(self):
task = DummyTask(3)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel()
self.assertTrue(runner.step())
def test_cancel_grace_period(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
task.do_step(1).AndReturn(None)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start()
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_before_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=10)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=1.0)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertTrue(runner.step())
def test_cancel_grace_period_after_timeout(self):
st = timeutils.wallclock()
task = DummyTask(5)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(timeutils, 'wallclock')
timeutils.wallclock().AndReturn(st)
timeutils.wallclock().AndReturn(st + 0.1)
task.do_step(1).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
task.do_step(2).AndReturn(None)
timeutils.wallclock().AndReturn(st + 0.2)
timeutils.wallclock().AndReturn(st + 0.5)
task.do_step(3).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.0)
task.do_step(4).AndReturn(None)
timeutils.wallclock().AndReturn(st + 1.5)
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.start(timeout=1.25)
self.assertTrue(runner.started())
self.assertFalse(runner.step())
runner.cancel(grace_period=3)
self.assertFalse(runner.step())
self.assertFalse(runner.step())
self.assertRaises(scheduler.Timeout, runner.step)
def test_cancel_grace_period_not_started(self):
task = DummyTask(1)
self.m.StubOutWithMock(task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.ReplayAll()
runner = scheduler.TaskRunner(task)
self.assertFalse(runner.started())
runner.cancel(grace_period=0.5)
self.assertTrue(runner.done())
class TimeoutTest(common.HeatTestCase):
def test_compare(self):
task = scheduler.TaskRunner(DummyTask())
earlier = scheduler.Timeout(task, 10)
eventlet.sleep(0.01)
later = scheduler.Timeout(task, 10)
self.assertTrue(earlier < later)
self.assertTrue(later > earlier)
self.assertEqual(earlier, earlier)
self.assertNotEqual(earlier, later)
class DescriptionTest(common.HeatTestCase):
def setUp(self):
super(DescriptionTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_func(self):
def f():
pass
self.assertEqual('f', scheduler.task_description(f))
def test_lambda(self):
l = lambda: None
self.assertEqual('<lambda>', scheduler.task_description(l))
def test_method(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def m(self):
pass
self.assertEqual('m from C "o"', scheduler.task_description(C().m))
def test_object(self):
class C(object):
def __str__(self):
return 'C "o"'
def __repr__(self):
return 'o'
def __call__(self):
pass
self.assertEqual('o', scheduler.task_description(C()))
class WrapperTaskTest(common.HeatTestCase):
def setUp(self):
super(WrapperTaskTest, self).setUp()
self.addCleanup(self.m.VerifyAll)
def test_wrap(self):
child_tasks = [DummyTask() for i in range(3)]
@scheduler.wrappertask
def task():
for child_task in child_tasks:
yield child_task()
yield
for child_task in child_tasks:
self.m.StubOutWithMock(child_task, 'do_step')
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
scheduler.TaskRunner._sleep(0).AndReturn(None)
for child_task in child_tasks:
child_task.do_step(1).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(2).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
child_task.do_step(3).AndReturn(None)
scheduler.TaskRunner._sleep(1).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(task)()
def test_child_exception(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_child_exception_exit(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, next, task)
def test_child_exception_swallow(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield
else:
self.fail('No exception raised in parent_task')
yield
task = parent_task()
next(task)
next(task)
def test_child_exception_swallow_next(self):
class MyException(Exception):
pass
def child_task():
yield
raise MyException()
dummy = DummyTask()
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
pass
else:
self.fail('No exception raised in parent_task')
yield dummy()
task = parent_task()
next(task)
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
for i in range(1, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_swallow_next(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
yield dummy()
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_raise(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
raise
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except MyException:
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_thrown_exception_exit(self):
class MyException(Exception):
pass
dummy = DummyTask()
@scheduler.wrappertask
def child_task():
try:
yield
except MyException:
return
else:
self.fail('No exception raised in child_task')
@scheduler.wrappertask
def parent_task():
yield child_task()
yield dummy()
task = parent_task()
self.m.StubOutWithMock(dummy, 'do_step')
for i in range(1, dummy.num_steps + 1):
dummy.do_step(i).AndReturn(None)
self.m.ReplayAll()
next(task)
task.throw(MyException)
for i in range(2, dummy.num_steps + 1):
next(task)
self.assertRaises(StopIteration, next, task)
def test_parent_exception(self):
class MyException(Exception):
pass
def child_task():
yield
@scheduler.wrappertask
def parent_task():
yield child_task()
raise MyException()
task = parent_task()
next(task)
self.assertRaises(MyException, next, task)
def test_parent_throw(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
raise
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(MyException, task.throw, MyException())
def test_parent_throw_exit(self):
class MyException(Exception):
pass
@scheduler.wrappertask
def parent_task():
try:
yield DummyTask()()
except MyException:
return
else:
self.fail('No exception raised in parent_task')
task = parent_task()
next(task)
self.assertRaises(StopIteration, task.throw, MyException())
def test_parent_cancel(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_parent_cancel_exit(self):
@scheduler.wrappertask
def parent_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel(self):
def child_task():
try:
yield
except GeneratorExit:
raise
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
raise
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
def test_cancel_parent_exit(self):
def child_task():
try:
yield
except GeneratorExit:
return
else:
self.fail('child_task not closed')
@scheduler.wrappertask
def parent_task():
try:
yield child_task()
except GeneratorExit:
return
else:
self.fail('parent_task not closed')
task = parent_task()
next(task)
task.close()
|
|
import warnings, copy
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, AltAz, ICRS, FK5, EarthLocation, Longitude
from astropy import units as U
import numpy as NP
import constants as CNST
# Perform some IERS adjustments
from astropy.utils import iers
tnow = Time.now()
try:
print('Checking if some IERS related adjustments are required...')
tnow_ut1 = tnow.ut1
except iers.IERSRangeError as exception:
default_iers_auto_url = 'http://maia.usno.navy.mil/ser7/finals2000A.all'
secondary_iers_auto_url = 'https://datacenter.iers.org/data/9/finals2000A.all'
tertiary_iers_auto_url = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
try:
# iers.conf.iers_auto_url = default_iers_auto_url
iers.conf.remote_timeout = 120.0
iers.IERS_A.open(iers.IERS_A_URL)
except Exception as err:
if ('url' in str(err).lower()) or (('connection' in str(err).lower())):
print(err)
print('Original source URL for IERS_A: {0} FAILED!'.format(iers.conf.iers_auto_url))
print('Original IERS Configuration:')
print(iers.conf.__class__.__dict__)
print('Modifying the source URL for IERS_A table to {0}'.format(secondary_iers_auto_url))
# iers.IERS_A_URL = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
iers.conf.auto_download = True
iers.conf.iers_auto_url = secondary_iers_auto_url
# iers.conf.iers_auto_url = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
try:
print('Now testing {0}'.format(secondary_iers_auto_url))
iers_a = iers.IERS_A.open(secondary_iers_auto_url)
except Exception as newerr:
if ('url' in str(err).lower()):
print(newerr)
print('Modified URL also did not work. Computation of LST may be affected or will completely fail.')
# raise newerr
else:
print('Updated source URL {0} worked!'.format(secondary_iers_auto_url))
print('Modified IERS Configuration:')
print(iers.conf.__class__.__dict__)
try:
tnow_ut1 = tnow.ut1
except iers.IERARangeError as exception:
print(exception)
warnings.warn('Ephemeris predictions will be unreliable despite a successful download of IERS tables')
################################################################################
def equation_of_equinoxes(jd):
"""
----------------------------------------------------------------------------
Estimate the equation of the equinoxes
Inputs:
jd [scalar or numpy array] Julian date at which nutation is to be
estimated and the equation of equinoxes is returned.
Output:
Equation of the equinoxes (in hours) that should be used to correct the
Greenwich Mean Sidereal Time to obtain the Greenwich Apparent Sidereal Time
Notes: Adopted from https://aa.usno.navy.mil/faq/docs/GAST.php
----------------------------------------------------------------------------
"""
if not isinstance(jd, (int, float, NP.ndarray)):
raise TypeError('Input julian date(s) must be a scalar or numpy array')
d = jd - 2451545.0 # Days since 2000 January 1, 12h UT, Julian date 2451545.0
omega = 125.04 - 0.052954 * d # Longitude of the ascending node of the Moon in degrees
l = 280.47 + 0.98565 * d # Mean Longitude of the Sun in degrees
obliquity = 23.4393 - 0.0000004 * d # in degrees
nutation = -0.000319 * NP.sin(NP.radians(omega)) - 0.000024 * NP.sin(NP.radians(2*l)) # in hours
eqeq = nutation * NP.cos(NP.radians(obliquity)) # Equation of the equinoxes in hours
# t = d / 36525 # number of centuries since 2000 January 1, 12h UT, Julian date 2451545.0
return eqeq
################################################################################
def julian_date_from_LAST(last, jd0, longitude, tol=1e-6):
"""
----------------------------------------------------------------------------
Inputs:
last [scalar or numpy array] Local Apparent Sidereal Time (in hours)
jd0 [scalar or numpy array] Julian date at previous midnight. Same
size as input 'last' or numpy broadcastable to that shape
longitude [scalar or numpy array] Longitude of observing site (in hours).
Same size as input 'last' or numpy broadcastable to that shape
tol [scalar] Tolerance for convergence since these calculations
are iteratively solved
Output:
Julian date(s) as a numpy array correspoding to the input apparent sidereal
time and longitude on given starting Julian dates.
Notes: Adopted from https://aa.usno.navy.mil/faq/docs/GAST.php
----------------------------------------------------------------------------
"""
if not isinstance(jd0, (int, float, NP.ndarray)):
raise TypeError('Input starting julian date(s) must be a scalar or numpy array')
jd0 = NP.asarray(jd0).ravel()
if not isinstance(last, (int, float, NP.ndarray)):
raise TypeError('Input local apparent sidereal time(s) must be a scalar or numpy array')
last = NP.asarray(last).ravel()
if not isinstance(longitude, (int, float, NP.ndarray)):
raise TypeError('Input longitude(s) must be a scalar or numpy array')
longitude = NP.asarray(longitude).ravel()
jd = NP.copy(jd0)
gast = last - longitude
d0 = jd0 - 2451545.0
if gast < 0.0:
gast += 24.0
if gast >= 24.0:
gast -= 24.0
gmst0 = 18.697374558 + 24.06570982441908 * d0 # Accurate to 0.1s per century
gmst0 = gmst0 % 24
if gmst0 < 0.0:
gmst0 += 24.0
if gmst0 >= 24.0:
gmst0 -= 24.0
dev = 100 * tol
iternum = 0
hr = None
while (iternum < 1000) and (NP.abs(dev) > tol):
eqeq = equation_of_equinoxes(jd)
gmst = gast - eqeq
gmst = gmst % 24
if gmst < 0.0:
gmst += 24.0
if gmst >= 24.0:
gmst -= 24.0
newhr = gmst - gmst0
newhr *= CNST.sday
if hr is not None:
dev = (newhr - hr) / hr
hr = NP.copy(newhr)
jd = jd0 + hr/24.0
iternum += 1
return jd
################################################################################
def gmst2gps(day, GMST, type='mean', iterations=10, precision=1e-14):
"""
----------------------------------------------------------------------------
gps=gmst2gps(day, GMST, type='mean', iterations=10, precision=1e-14)
returns the GPS time associated with the GMST/GAST on the given day
type can be 'mean' or 'apparent'
uses a root-find, so not super fast
----------------------------------------------------------------------------
"""
assert type in ['mean','apparent']
gmst=Longitude(GMST,unit='h')
t=Time(day,scale='utc')
iteration=0
siderealday_to_solarday=0.99726958
while iteration < iterations:
error=t.sidereal_time(type,'greenwich')-gmst
if NP.abs(error/gmst).value <= precision:
return t.gps
t=t-TimeDelta((error).hour*U.hour)*siderealday_to_solarday
iteration+=1
return None
################################################################################
def hadec2radec(hadec, lst, obstime=None, epoch_RA=2000.0, time_type=None):
"""
----------------------------------------------------------------------------
Convert HA-Dec to RA-Dec with accurate ephemeris
Inputs:
hadec [numpy array] HA and Dec as a Nx2 numpy array. All units in degrees
lst [scalar or numpy array] Local Sidereal time (in degrees). If a
scalar is specified, it will be applied to all the entries of input
hadec. If an array is given it should be numpy array broadcastable
with hadec. That is, shape must be (1,) or (N,).
obstime [scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to input hadec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. If set to None (default), it
will be set equal to epoch_RA.
epoch_RA
[scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to output radec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. It must be in the same format
as the one obstime is specified in. If set to 2000.0 (default), it
is assumed to be in 'jyear' format. If set to None, will be set
equal to 2000.0 in 'jyear' format.
time_type
[string] Specifies the format in which obstime and epoch_RA are
provided. Accepted values are 'jd' (Julian Day), 'jyear' (Julian
year), 'iso' or 'isot'. If set to None (default) and if obstime
and/or epoch_RA is a scalar, the corresponding scalar entries are
assumed to be in Julian Year.
Output:
The output radec as a numpy array of shape (N,2) in units of degrees is
returned at the epoch specified in epoch_RA.
----------------------------------------------------------------------------
"""
if not isinstance(hadec, NP.ndarray):
raise TypeError('Input hadec must be a numpy array')
if hadec.size == 2:
hadec = hadec.reshape(1,-1)
if hadec.ndim != 2:
raise ValueError('Input hadec must be a 2D numpy array of shape(nsrc,2)')
if hadec.shape[1] != 2:
raise ValueError('Input hadec must be a 2D numpy array of shape(nsrc,2)')
if isinstance(lst, (int,float)):
lst = NP.asarray(lst).astype(NP.float).reshape(-1)
if (lst.size != 1) and (lst.size != hadec.shape[0]):
raise ValueError('Input LST must match the shape of input hadec')
if epoch_RA is not None:
if isinstance(epoch_RA, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_RA = Time(epoch_RA, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_RA = Time(epoch_RA, scale='utc', format='jd')
elif isinstance(epoch_RA, str):
if time_type.lower() == 'jyear':
equinox_RA = Time('J{0:.9f}'.format(epoch_RA), scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_RA = Time(epoch_RA, scale='utc', format=time_type.lower())
elif isinstance(epoch_RA, Time):
equinox_RA = copy.copy(epoch_RA)
else:
raise TypeError('Input epoch_RA is invalid or currently not accepted')
else:
equinox_RA = Time(2000.0, format='jyear', scale='utc')
warnings.warn('No epoch_RA provided. Setting epoch to {0}'.format(equinox_RA.jyear_str))
if obstime is not None:
if isinstance(obstime, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_HA = Time(obstime, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_HA = Time(obstime, scale='utc', format='jd')
elif isinstance(obstime, str):
if time_type.lower() == 'jyear':
equinox_HA = Time(obstime, scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_HA = Time(obstime, scale='utc', format=time_type.lower())
elif isinstance(obstime, Time):
equinox_HA = copy.copy(obstime)
else:
raise TypeError('Input obstime is invalid or currently not accepted')
if obstime is None:
equinox_HA = copy.copy(equinox_RA)
warnings.warn('No obstime provided. Setting obstime to {0}'.format(equinox_HA.jyear_str))
# if (obstime is None) and (epoch_RA is not None):
# equinox_HA = copy.copy(equinox_RA)
# elif (obstime is not None) and (epoch_RA is None):
# equinox_RA = copy.copy(equinox_HA)
# elif (obstime is None) and (epoch_RA is None):
# equinox_HA = None
# equinox_RA = None
radec_obstime = NP.hstack(((lst-hadec[:,0]).reshape(-1,1), hadec[:,1].reshape(-1,1)))
if (equinox_HA is None) and (equinox_RA is None):
return radec_obstime
else:
skycoords = SkyCoord(ra=radec_obstime[:,0]*U.deg, dec=radec_obstime[:,1]*U.deg, frame='fk5', equinox=equinox_HA).transform_to(FK5(equinox=equinox_RA))
radec = NP.hstack((skycoords.ra.deg.reshape(-1,1), skycoords.dec.deg.reshape(-1,1)))
return radec
#################################################################################
def radec2hadec(radec, lst, obstime=None, epoch_RA=None, time_type=None):
"""
----------------------------------------------------------------------------
Convert RA-Dec to HA-Dec with accurate ephemeris
Inputs:
radec [numpy array] RA and Dec as a Nx2 numpy array. All units in degrees
lst [scalar or numpy array] Local Sidereal time (in degrees). If a
scalar is specified, it will be applied to all the entries of input
radec. If an array is given it should be numpy array broadcastable
with radec. That is, shape must be (1,) or (N,).
obstime [scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to output hadec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. If set to None (default), it
will be set equal to epoch_RA.
epoch_RA
[scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to input radec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. It must be in the same format
as the one obstime is specified in. If set to 2000.0 (default), it
is assumed to be in 'jyear' format. If set to None, it will be set
equal to default of 2000.0 in 'jyear' format.
time_type
[string] Specifies the format in which obstime and epoch_RA are
provided. Accepted values are 'jd' (Julian Day), 'jyear' (Julian
year), 'iso' or 'isot'. If set to None (default) and if obstime
and/or epoch_RA is a scalar, the corresponding scalar entries are
assumed to be in Julian Year.
Output:
The output hadec as a numpy array of shape (N,2) in units of degrees
is returned at the epoch specified in obstime.
----------------------------------------------------------------------------
"""
if not isinstance(radec, NP.ndarray):
raise TypeError('Input radec must be a numpy array')
if radec.size == 2:
radec = radec.reshape(1,-1)
if radec.ndim != 2:
raise ValueError('Input radec must be a 2D numpy array of shape(nsrc,2)')
if radec.shape[1] != 2:
raise ValueError('Input radec must be a 2D numpy array of shape(nsrc,2)')
if isinstance(lst, (int,float)):
lst = NP.asarray(lst).astype(NP.float).reshape(-1)
if (lst.size != 1) and (lst.size != radec.shape[0]):
raise ValueError('Input LST must match the shape of input radec')
if epoch_RA is not None:
if isinstance(epoch_RA, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_RA = Time(epoch_RA, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_RA = Time(epoch_RA, scale='utc', format='jd')
elif isinstance(epoch_RA, str):
if time_type.lower() == 'jyear':
equinox_RA = Time('J{0:.9f}'.format(epoch_RA), scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_RA = Time(epoch_RA, scale='utc', format=time_type.lower())
elif isinstance(epoch_RA, Time):
equinox_RA = copy.copy(epoch_RA)
else:
raise TypeError('Input epoch_RA is invalid or currently not accepted')
else:
equinox_RA = Time(2000.0, format='jyear', scale='utc')
warnings.warn('No epoch_RA provided. Setting epoch to {0}'.format(equinox_RA.jyear_str))
if obstime is not None:
if isinstance(obstime, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_HA = Time(obstime, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_HA = Time(obstime, scale='utc', format='jd')
elif isinstance(obstime, str):
if time_type.lower() == 'jyear':
equinox_HA = Time(obstime, scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_HA = Time(obstime, scale='utc', format=time_type.lower())
elif isinstance(obstime, Time):
equinox_HA = copy.copy(obstime)
else:
raise TypeError('Input obstime is invalid or currently not accepted')
if obstime is None:
equinox_HA = copy.copy(equinox_RA)
warnings.warn('No obstime provided. Setting obstime to {0}'.format(equinox_HA.jyear_str))
# if (obstime is None) and (epoch_RA is not None):
# equinox_HA = copy.copy(equinox_RA)
# elif (obstime is not None) and (epoch_RA is None):
# equinox_RA = copy.copy(equinox_HA)
# elif (obstime is None) and (epoch_RA is None):
# equinox_HA = None
# equinox_RA = None
# if (equinox_HA is None) and (equinox_RA is None):
# return NP.hstack(((lst - radec[:,0]).reshape(-1,1), radec[:,1].reshape(-1,1)))
# else:
skycoords = SkyCoord(ra=radec[:,0]*U.deg, dec=radec[:,1]*U.deg, equinox=equinox_RA, frame='fk5').transform_to(FK5(equinox=equinox_HA))
hadec = NP.hstack(((lst-skycoords.ra.deg).reshape(-1,1), skycoords.dec.deg.reshape(-1,1)))
return hadec
#################################################################################
def altaz2radec(altaz, location, obstime=None, epoch_RA=2000.0, time_type=None):
"""
----------------------------------------------------------------------------
Convert Alt-Az to RA-Dec with accurate ephemeris
Inputs:
altaz [numpy array] Altitude and Azimuth as a Nx2 numpy array. All units
in degrees
location
[instance of class astropy.coordinates.EarthLocation] Location of
the observer provided as an instance of class
astropy.coordinates.EarthLocation
obstime [scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to input altaz. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. If set to None (default), it
will be set equal to epoch_RA.
epoch_RA
[scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to output radec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. It must be in the same format
as the one obstime is specified in. If set to 2000.0 (default), it
is assumed to be in 'jyear' format. If set to None, it will be set
equal to default of 2000.0 in 'jyear' format.
time_type
[string] Specifies the format in which obstime and epoch_RA are
provided. Accepted values are 'jd' (Julian Day), 'jyear' (Julian
year), 'iso' or 'isot'. If set to None (default) and if obstime
and/or epoch_RA is a scalar, the corresponding scalar entries are
assumed to be in Julian Year.
Output:
The output radec as a numpy array of shape (N,2) in units of degrees is
returned at the epoch specified in epoch_RA.
----------------------------------------------------------------------------
"""
if isinstance(altaz, NP.ndarray):
if altaz.size == 2:
altaz = altaz.reshape(1,-1)
if altaz.ndim != 2:
raise ValueError('Input altaz must be a numpy array of shape (N,2)')
if altaz.shape[1] != 2:
raise ValueError('Input altaz must be a numpy array of shape (N,2)')
elif not isinstance(altaz, AltAz):
raise TypeError('Input altaz must be a numpy array or an instance of class astropy.coordinates.AltAz')
if not isinstance(location, EarthLocation):
raise TypeError('Input location must be an instance of class astropy.coordinates.EarthLocation')
if epoch_RA is not None:
if isinstance(epoch_RA, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_RA = Time(epoch_RA, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_RA = Time(epoch_RA, scale='utc', format='jd')
elif isinstance(epoch_RA, str):
if time_type.lower() == 'jyear':
equinox_RA = Time('J{0:.9f}'.format(epoch_RA), scale='utc', format='jyear_str')
elif (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_RA = Time(epoch_RA, scale='utc', format=time_type.lower())
elif isinstance(epoch_RA, Time):
equinox_RA = copy.copy(epoch_RA)
else:
raise TypeError('Input epoch_RA is invalid or currently not accepted')
else:
equinox_RA = Time(2000.0, format='jyear', scale='utc')
warnings.warn('No epoch_RA provided. Setting epoch to {0}'.format(equinox_RA.jyear_str))
if obstime is not None:
if isinstance(obstime, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_altaz = Time(obstime, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_altaz = Time(obstime, scale='utc', format='jd')
elif isinstance(obstime, str):
if time_type.lower() == 'jyear':
equinox_altaz = Time('J{0:.9f}'.format(obstime), scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_altaz = Time(obstime, scale='utc', format=time_type.lower())
elif isinstance(obstime, Time):
equinox_altaz = copy.copy(obstime)
else:
raise TypeError('Input obstime is invalid or currently not accepted')
else:
if isinstance(altaz, AltAz):
equinox_altaz = copy.deepcopy(altaz.obstime)
else:
equinox_altaz = copy.copy(equinox_RA)
warnings.warn('No obstime provided. Setting obstime to {0}'.format(equinox_altaz.jyear_str))
if isinstance(altaz, AltAz):
elaz = copy.deepcopy(altaz)
else:
elaz = AltAz(alt=altaz[:,0]*U.deg, az=altaz[:,1]*U.deg, obstime=equinox_altaz, location=location)
coords_radec = elaz.transform_to(FK5(equinox=equinox_RA))
radec = NP.hstack((coords_radec.ra.deg.reshape(-1,1), coords_radec.dec.deg.reshape(-1,1)))
return radec
#################################################################################
def radec2altaz(radec, location, obstime=None, epoch_RA=2000.0, time_type=None):
"""
----------------------------------------------------------------------------
Convert RA-Dec to Alt-Az with accurate ephemeris
Inputs:
radec [numpy array] Altitude and Azimuth as a Nx2 numpy array. All units
in degrees
location
[instance of class astropy.coordinates.EarthLocation] Location of
the observer provided as an instance of class
astropy.coordinates.EarthLocation
obstime [scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to output altaz. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. If set to None (default), it
will be set equal to epoch_RA.
epoch_RA
[scalar, string, or instance of class astropy.time.Time] The time
or epoch which applies to input radec. It can be a scalar (in JD
or JYear), string (JYear string prefixed with 'J' or ISO or ISOT),
or an instance of class astropy.time.Time. The appropriate format
must be specified in input time_type. It must be in the same format
as the one obstime is specified in. If set to 2000.0 (default), it
is assumed to be in 'jyear' format. If set to None, it will be set
equal to default of 2000.0 in 'jyear' format.
time_type
[string] Specifies the format in which obstime and epoch_RA are
provided. Accepted values are 'jd' (Julian Day), 'jyear' (Julian
year), 'iso' or 'isot'. If set to None (default) and if obstime
and/or epoch_RA is a scalar, the corresponding scalar entries are
assumed to be in Julian Year.
Output:
The output altaz as a numpy array of shape (N,2) in units of degrees is
returned at the epoch specified in epoch_RA.
----------------------------------------------------------------------------
"""
if isinstance(radec, NP.ndarray):
if radec.size == 2:
radec = radec.reshape(1,-1)
if radec.ndim != 2:
raise ValueError('Input radec must be a numpy array of shape (N,2)')
if radec.shape[1] != 2:
raise ValueError('Input radec must be a numpy array of shape (N,2)')
elif not isinstance(radec, SkyCoord):
raise TypeError('Input radec must be a numpy array or an instance of class astropy.coordinates.SkyCoord')
if not isinstance(location, EarthLocation):
raise TypeError('Input location must be an instance of class astropy.coordinates.EarthLocation')
if epoch_RA is not None:
if isinstance(epoch_RA, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_RA = Time(epoch_RA, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_RA = Time(epoch_RA, scale='utc', format='jd')
elif isinstance(epoch_RA, str):
if time_type.lower() == 'jyear':
equinox_RA = Time('J{0:.9f}'.format(epoch_RA), scale='utc', format='jyear_str')
elif (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_RA = Time(epoch_RA, scale='utc', format=time_type.lower())
elif isinstance(epoch_RA, Time):
equinox_RA = copy.copy(epoch_RA)
else:
raise TypeError('Input epoch_RA is invalid or currently not accepted')
else:
equinox_RA = Time(2000.0, format='jyear', scale='utc')
warnings.warn('No epoch_RA provided. Setting epoch to {0}'.format(equinox_RA.jyear_str))
if obstime is not None:
if isinstance(obstime, (int,float)):
if (time_type is None) or (time_type.lower() == 'jyear'):
equinox_altaz = Time(obstime, scale='utc', format='jyear')
elif time_type.lower() == 'jd':
equinox_altaz = Time(obstime, scale='utc', format='jd')
elif isinstance(obstime, str):
if time_type.lower() == 'jyear':
equinox_altaz = Time('J{0:.9f}'.format(obstime), scale='utc', format='jyear_str')
if (time_type.lower() == 'iso') or (time_type.lower() == 'isot'):
equinox_altaz = Time(obstime, scale='utc', format=time_type.lower())
elif isinstance(obstime, Time):
equinox_altaz = copy.copy(obstime)
else:
raise TypeError('Input obstime is invalid or currently not accepted')
else:
equinox_altaz = copy.copy(equinox_RA)
warnings.warn('No obstime provided. Setting obstime to {0}'.format(equinox_altaz.jyear_str))
if isinstance(radec, SkyCoord):
coords_radec = copy.deepcopy(radec)
else:
coords_radec = SkyCoord(ra=radec[:,0]*U.deg, dec=radec[:,1]*U.deg, equinox=equinox_RA, frame='fk5')
elaz = coords_radec.transform_to(AltAz(obstime=equinox_altaz, location=location))
altaz = NP.hstack((elaz.alt.deg.reshape(-1,1), elaz.az.deg.reshape(-1,1)))
return altaz
#################################################################################
|
|
"""Views for ecommerce"""
from decimal import Decimal
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from ipware import get_client_ip
from rest_framework import status as statuses
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import CreateAPIView, GenericAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.validators import ValidationError
from rest_framework.views import APIView
from applications.constants import AppStates
from applications.models import BootcampApplication
from backends.edxorg import EdxOrgOAuth2
from ecommerce.api import (
complete_successful_order,
create_unfulfilled_order,
generate_cybersource_sa_payload,
get_new_order_by_reference_number,
handle_rejected_order,
serialize_user_bootcamp_run,
serialize_user_bootcamp_runs,
)
from ecommerce.constants import CYBERSOURCE_DECISION_ACCEPT, CYBERSOURCE_DECISION_CANCEL
from ecommerce.exceptions import EcommerceException
from ecommerce.models import Line, Order, Receipt
from ecommerce.permissions import IsSignedByCyberSource
from ecommerce.serializers import (
CheckoutDataSerializer,
PaymentSerializer,
OrderSerializer,
)
from hubspot.task_helpers import sync_hubspot_application_from_order
from klasses.models import BootcampRun
from klasses.permissions import CanReadIfSelf
from main.permissions import UserIsOwnerOrAdminPermission
from main.serializers import serialize_maybe_user
log = logging.getLogger(__name__)
User = get_user_model()
class PaymentView(CreateAPIView):
"""
View for payment API. This creates an Order in our system and provides a dictionary to send to Cybersource.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = PaymentSerializer
def post(self, request, *args, **kwargs):
"""
Create an unfulfilled order and return a response for it.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
payment_amount = Decimal(serializer.data["payment_amount"])
application_id = serializer.data["application_id"]
application = get_object_or_404(
BootcampApplication, id=application_id, user=self.request.user
)
if application.state != AppStates.AWAITING_PAYMENT.value:
log.error(
"User attempted to pay for application %d with invalid state %s",
application.id,
application.state,
)
raise ValidationError("Invalid application state")
order = create_unfulfilled_order(
application=application, payment_amount=payment_amount
)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
redirect_url = self.request.build_absolute_uri(reverse("applications"))
user_ip, _ = get_client_ip(request)
return Response(
{
"payload": generate_cybersource_sa_payload(
order, redirect_url, ip_address=user_ip
),
"url": settings.CYBERSOURCE_SECURE_ACCEPTANCE_URL,
}
)
class OrderFulfillmentView(APIView):
"""
View for order fulfillment API. This API is special in that only CyberSource should talk to it.
Instead of authenticating with OAuth or via session this looks at the signature of the message
to verify authenticity.
"""
authentication_classes = ()
permission_classes = (IsSignedByCyberSource,)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Confirmation from CyberSource which fulfills an existing Order.
"""
# First, save this information in a receipt
receipt = Receipt.objects.create(data=request.data)
# Link the order with the receipt if we can parse it
reference_number = request.data["req_reference_number"]
order = get_new_order_by_reference_number(reference_number)
receipt.order = order
receipt.save()
decision = request.data["decision"]
if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL:
# This is a duplicate message, ignore since it's already handled
return Response(status=statuses.HTTP_200_OK)
elif order.status != Order.CREATED:
raise EcommerceException(
"Order {} is expected to have status 'created'".format(order.id)
)
if decision != CYBERSOURCE_DECISION_ACCEPT:
handle_rejected_order(order=order, decision=decision)
else:
# import pdb; pdb.set_trace()
complete_successful_order(order)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
# The response does not matter to CyberSource
return Response(status=statuses.HTTP_200_OK)
class UserBootcampRunDetail(GenericAPIView):
"""
Class based view for user bootcamp run view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns a serialized bootcamp run and payment for a user
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
bootcamp_run = self.get_object()
return Response(
serialize_user_bootcamp_run(user=user, bootcamp_run=bootcamp_run)
)
class UserBootcampRunStatement(RetrieveAPIView):
"""
View class for a user's bootcamp run payment statement
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request, *args, **kwargs):
"""
Fetches a user's bootcamp run payment information and renders their statement
(or raises a 404 if they have no payments for the specified bootcamp run)
"""
bootcamp_run = self.get_object()
if Line.for_user_bootcamp_run(request.user, bootcamp_run).count() == 0:
raise Http404
return Response(
{
"user": serialize_maybe_user(request.user),
"bootcamp_run": serialize_user_bootcamp_run(
user=request.user, bootcamp_run=bootcamp_run
),
},
template_name="bootcamp/statement.html",
)
class UserBootcampRunList(APIView):
"""
Class based view for user bootcamp run list view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns serialized bootcamp runs and payments for all runs that a user can pay for.
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
return Response(serialize_user_bootcamp_runs(user=user))
class CheckoutDataView(RetrieveAPIView):
"""
List application ecommerce data for a user, for payable applications
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CheckoutDataSerializer
def get_queryset(self):
"""Filter on valid applications for the user"""
return (
BootcampApplication.objects.filter(
user=self.request.user, state=AppStates.AWAITING_PAYMENT.value
)
.select_related("bootcamp_run")
.prefetch_related(
"bootcamp_run__personal_prices",
"bootcamp_run__installment_set",
"orders",
"orders__line_set",
)
.order_by("id")
)
def get_object(self):
"""Get the application given the query parameter"""
application_id = self.request.query_params.get("application")
return get_object_or_404(self.get_queryset(), id=application_id)
class OrderView(RetrieveAPIView):
"""API view for Orders"""
permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission)
serializer_class = OrderSerializer
queryset = Order.objects.all()
owner_field = "user"
|
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
Specific backend implementation that leveraged an SQLite3 database to store
features.
"""
import numpy
import os
import os.path as osp
import sqlite3
from . import VCDStoreBackendInterface
from ..VCDStoreElement import VCDStoreElement
from ..errors import *
class SQLiteVCDStoreBackend(VCDStoreBackendInterface):
"""
A database specific implementation of the VCDStore using sqlite3.
For ``None`` fields in VCDStoreElement object, stores a -1 in the database
since there should never be a -1 value for any of the metadata fields
anyway.
"""
# The default path where store files/databases will be recorded. This may be
# changed before the construction of a FrameStore object to change where the
# object will look for possibly existing files/databases.
DEFAULT_DB_NAME = 'SQLiteVCDStore.db'
# Default table name
DEFAULT_TABLE_NAME = 'feature_store'
# Column details (name, sql_type + constraints)
__COL_DEF = (
('descriptor_id', 'TEXT NOT NULL'),
('video_id', 'INTEGER NOT NULL'),
('frame_num', 'INTEGER NOT NULL'),
('timestamp', 'REAL NOT NULL'),
('spacial_x', 'REAL NOT NULL'),
('spacial_y', 'REAL NOT NULL'),
('feature_vec', 'BLOB NOT NULL')
)
# Insert command update.
# This should take:
# - fallback: the conflict mode
# - table_name: the table name to insert into
# - col_list: a string of the comma-separated column names for the table
# - placeholder_list: a string of comma-separated place holder string
# values ('?'0). This needs to be exactly as long as
# the column list.
__INSERT__TMPL = ("INSERT OR {fallback} INTO {table_name} "
"({col_list}) VALUES ({placeholder_list});")
# Select cmd template. Returns the feature vector blob for each row
# selected.
# This should take:
# - col_name: The name of the column that we want to select (or multiple
# column names, separated by commas
# - table_name: the name of the table to select from
# - where_criteria: A string of comma-separated column constraints for the
# select.
__SELECT_TMPL = "SELECT {cols} FROM {table_name}"
def __init__(self, fs_db_path=None, db_root=None):
"""
Sqlite specific FrameStoreBackend implementation. If explicit values are
not provided for the feature database locations, defaults are used..
:param fs_db_path: The path to the feature store database file. If not
provided, this defaults to "{db_root}/{DEFAULT_DB_NAME}". This may
also be an absolute path, which would cause the db_root to not be
used, If a relative path is given it is interpreted relative to the
given ``db_root`` (which defaults to the current working directory).
:type fs_db_path: str
:param db_root: The root directory to create / look for existing
databases. If this database is not provided, we will use the current
working directory.
:type db_root: str
"""
# immediate super method does nothing.
super(SQLiteVCDStoreBackend, self).__init__()
###
# Resolve database locations
#
self._db_root = osp.abspath(osp.expanduser(db_root)) if db_root \
else os.getcwd()
self._db_path = \
osp.join(self._db_root, osp.expanduser(fs_db_path)) if fs_db_path \
else osp.join(self._db_root, self.DEFAULT_DB_NAME)
# reassign db_root again as fs_db_path may have been an absolute path
self._db_root = osp.dirname(self._db_path)
# Check that database directory exists. If not, create it.
if not osp.isdir(self._db_root):
os.makedirs(self._db_root)
###
# Initialize database connections.
#
# Also attempt to make the table for each database if it doesn't exist
#
self._db_conn = sqlite3.connect(self._db_path)
self._db_cursor = self._db_conn.cursor()
self._db_cursor.execute(self.__create_table_cmd())
self._db_conn.commit()
def __del__(self):
self._db_cursor.close()
self._db_conn.close()
def __create_table_cmd(self):
""" Construct and return the create table SQL command.
This uses the __COL_DEF structure to generated the command. Every column
except the last will be treated as primary keys. The last column should
always be the feature vector BLOB column.
:return: The table creation command as a string.
:rtype: str
"""
cmd_template = ("CREATE TABLE IF NOT EXISTS %(table_name)s "
"("
" %(col_defs)s,"
" PRIMARY KEY (%(pk_list)s)"
");")
# construct PK col listing from everything but last column
pk_cols = [n for n, _ in self.__COL_DEF[:-1]]
pk_cols = ', '.join(pk_cols)
# construct the column definition body
col_defs_body = ', '.join(['%s %s' % (n, t) for n, t in self.__COL_DEF])
# flesh out template and return
return cmd_template % {'table_name': self.DEFAULT_TABLE_NAME,
'col_defs': col_defs_body,
'pk_list': pk_cols}
def __gen_insert_cmd(self, table_name, col_list, overwrite=False):
"""
Generate a generic insert command for the given column list with the
standard sqlite placeholder characters for use with the sqlite execute
command. We can optionally set the call to overwrite existing values.
:param table_name: The name of the table to insert into.
:type table_name: str
:param col_list: The list of column names to insert into.
:type col_list: list of str or tuple of str
:param overwrite: Allow the command to overwrite an existing value.
:type overwrite: bool
:return: The constructed INSERT command to be used in the execute method
of a database cursor.
:rtype: str
"""
fallback = 'REPLACE' if overwrite else 'ABORT'
# Placeholder list ('?') must be the same length as the column list
ph_list = ', '.join(['?'] * len(col_list))
column_list = ', '.join(col_list)
return self.__INSERT__TMPL.format(fallback=fallback,
table_name=table_name,
col_list=column_list,
placeholder_list=ph_list)
def __gen_select_cmd(self, table_name, select_col_list,
where_col_list=None):
"""
Generate a generic select command for the sqlite execute command.
Optionally include one or more where clause qualification placeholders.
:param table_name: The name of the table to select from.
:type table_name: str
:param select_col_list: The column names for select out in the query.
:type select_col_list: tuple of str or list of str
:param where_col_list: The columns to create placeholders for in the
WHERE clause.
:type where_col_list: tuple of str or list of str
:return: The constructed SELECT command to be used in the execute method
of a database cursor.
:rtype: str
"""
select_cols = ', '.join(select_col_list)
stmt = self.__SELECT_TMPL.format(cols=select_cols,
table_name=table_name)
if where_col_list:
where_criteria = ' WHERE ' + ' AND '.join(['%s IS ?' % c
for c in where_col_list])
stmt += where_criteria
return stmt
def __generic_store(self, cursor, db, cmd, value_tuple):
"""
:type cursor: sqlite3.Cursor
:type db: sqlite3.Connection
:type cmd: str
:type value_tuple: tuple or list
"""
try:
cursor.executemany(cmd, value_tuple)
db.commit()
except sqlite3.IntegrityError as ex:
self._log.warn("Integrity Error. Rolling back. (error: %s)",
str(ex))
db.rollback()
raise VCDDuplicateFeatureError("Possible duplicate entry for keys: %s"
% str(value_tuple))
def __generic_get(self, cursor, cmd, value_tuple):
""" Returns a tuple of the returned rows.
This will always be at least a tuple of one element.
:type cursor: sqlite3.Cursor
:type cmd: str
:type value_tuple: tuple or list
:rtype: numpy.ndarray
"""
cursor.execute(cmd, value_tuple)
r_list = cursor.fetchall()
return r_list
def store_feature(self, feature_elements, overwrite=False):
# Store one or more VCDStoreElement entries. Unless ``feature_elements``
# is a VCDStoreElement object, assume its iterable.
if isinstance(feature_elements, VCDStoreElement):
feature_elements = (feature_elements,)
col_list = [e[0] for e in self.__COL_DEF]
cmd = self.__gen_insert_cmd(self.DEFAULT_TABLE_NAME,
col_list=col_list,
overwrite=overwrite)
# transform feature vector list into a list of buffers for each numpy
# ndarray
rows = list()
for fs in feature_elements:
# same order as __COL_DEF structure
rows.append((fs.descriptor_id,
fs.video_id,
fs.frame_num if fs.frame_num is not None else -1,
fs.timestamp if fs.timestamp is not None else -1,
fs.spacial_x if fs.spacial_x is not None else -1,
fs.spacial_y if fs.spacial_y is not None else -1,
buffer(fs.feat_vec)))
self.__generic_store(self._db_cursor, self._db_conn, cmd, rows)
def get_feature(self, descriptor_id, video_id, frame_num=None,
timestamp=None, spacial_x=None, spacial_y=None):
select_cols = [self.__COL_DEF[-1][0]] # just want the feature vector
# for this get function
# where clause and values will always contain all keys so as to only
# find specifically matching features.
where_cols = [e[0] for e in self.__COL_DEF[:-1]]
values = (descriptor_id, video_id,
frame_num if frame_num >= 0 else -1,
timestamp if timestamp >= 0 else -1,
spacial_x if spacial_x >= 0 else -1,
spacial_y if spacial_y >= 0 else -1)
cmd = self.__gen_select_cmd(self.DEFAULT_TABLE_NAME,
select_cols,
where_cols)
# print "Generated command:", cmd
# print "Input values:", values
# print "Raw return:", self.__generic_get(self._db_cursor, cmd, values)
ret_vals = self.__generic_get(self._db_cursor, cmd, values)
if not ret_vals: # no returned results for query
raise VCDNoFeatureError("No feature for the given query")
np_buffer = ret_vals[0][0]
#: :type: numpy.ndarray
feat_vec = numpy.frombuffer(np_buffer)
return VCDStoreElement(descriptor_id, video_id, feat_vec,
frame_num if frame_num != -1 else None,
timestamp if frame_num != -1 else None,
spacial_x if frame_num != -1 else None,
spacial_y if frame_num != -1 else None)
def get_features_by(self, descriptor_id=None, video_id=None, frame_num=None,
timestamp=None, spacial_x=None, spacial_y=None):
select_cols = [e[0] for e in self.__COL_DEF]
# for each key, add a where clause and add to the value list
where_cols = []
values = []
if descriptor_id is not None:
where_cols += [self.__COL_DEF[0][0]]
values += [descriptor_id]
if video_id is not None:
where_cols += [self.__COL_DEF[1][0]]
values += [video_id]
if frame_num is not None:
where_cols += [self.__COL_DEF[2][0]]
values += [frame_num]
if timestamp is not None:
where_cols += [self.__COL_DEF[3][0]]
values += [timestamp]
if spacial_x is not None:
where_cols += [self.__COL_DEF[4][0]]
values += [spacial_x]
if spacial_y is not None:
where_cols += [self.__COL_DEF[5][0]]
values += [spacial_y]
cmd = self.__gen_select_cmd(self.DEFAULT_TABLE_NAME,
select_cols,
where_cols)
raw = self.__generic_get(self._db_cursor, cmd, values)
# formulate return content
ret = []
for t in raw:
r = list(t)
# - Transform any element that's '-1' into a None.
for i, e in enumerate(r):
if e == -1:
r[i] = None
# - Transform feature vector in each returned tuple from a buffer
# object back into a numpy.ndarray (since select cols is set to all
# known columns).
r[6] = numpy.frombuffer(r[6])
# Create feature element knowing that r is in the same order
# as __COL_DEF
e = VCDStoreElement(r[0], r[1], r[6], r[2], r[3], r[4], r[5])
ret += [e]
return tuple(ret)
|
|
import re
from django.forms.widgets import TextInput, HiddenInput, MultiWidget, Media
from django.utils.safestring import mark_safe
from django_mongoengine.utils import OrderedDict
# The list of JavaScript files to insert to render any Dictionary widget
MEDIAS = ('jquery-1.8.0.min.js', 'dict.js', 'helper.js')
ADD_FIELD_VERBOSE = 'Add Field'
ADD_DICT_VERBOSE = ' - Add subdictionary'
class Dictionary(MultiWidget):
"""
A widget representing a dictionary field
"""
def __init__(self, schema=None, no_schema=1, max_depth=None,
flags=None, sub_attrs=None, attrs=None, verbose_dict=None,
verbose_field=None):
"""
:param schema: A dictionary representing the future schema of
the Dictionary widget. It is responsible for the
creation of subwidgets.
:param no_schema: An integer that can take 3 values : 0,1,2.
0 means that no schema was passed.
1 means that the schema passed was the default
one. This is the default value.
2 means that the schema passed was given
by a parent widget, and that it actually
represent data for rendering.
3 means that the schema was rebuilt after
retrieving form data.
:param max_depth: An integer representing the max depth of
sub-dicts. If passed, the system will
prevent to save dictionaries with depths
superior to this parameter.
:param flags: A list of flags. Available values :
- 'FORCE_SCHEMA' : would force dictionaries
to keep a certain schema. Only Pair fields
could be added.
:param sub_attrs: A dictionary that contains the classes
for the keys (key.class) and the values
(value.class) of each pair
:param verbose_field: verbose for 'Add field'
:param verbose_dict: verbose for 'Add dict'
"""
self.verbose_field = verbose_field or ADD_FIELD_VERBOSE
self.verbose_dict = verbose_dict or ADD_DICT_VERBOSE
self.no_schema = no_schema
self.max_depth = (max_depth if max_depth and max_depth >= 0 else None)
self.flags = flags or []
self.sub_attrs = sub_attrs or {}
if flags is not None and 'FORCE_SCHEMA' in flags:
self.pair = StaticPair
self.subdict = StaticSubDictionary
else:
self.pair = Pair
self.subdict = SubDictionary
widget_object = []
if isinstance(schema, dict) and self.no_schema > 0:
for key in schema:
if isinstance(schema[key], dict):
widget_object.append(self.subdict(key_value=key, schema=schema[key],
max_depth=max_depth, sub_attrs=self.sub_attrs,
attrs=attrs, verbose_field=self.verbose_field,
verbose_dict=self.verbose_dict))
else:
widget_object.append(self.pair(key_value=key, sub_attrs=self.sub_attrs, attrs=attrs))
else:
widget_object.append(self.pair(sub_attrs=self.sub_attrs, sattrs=attrs))
super(Dictionary, self).__init__(widget_object, attrs)
def decompress(self, value):
if value and isinstance(value, dict):
value = self.dict_sort(value)
value = list(value.items())
# If the schema in place wasn't passed by a parent widget
# we need to rebuild it
if self.no_schema < 2:
self.update_widgets(value, erase=True)
return value
else:
return []
def render(self, name, value, attrs=None):
if not isinstance(value, list):
value = self.decompress(value)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
suffix = widget.suffix
if id_:
final_attrs = dict(final_attrs, id='%s_%s_%s' %
(id_, i, suffix))
output.append(widget.render('%s_%s_%s' % (name, i, suffix),
widget_value,
final_attrs))
return mark_safe(self.format_output(name, output))
def value_from_datadict(self, data, files, name):
"""
Process is:
- erase every widget ;
- create the new ones from the data dictionary
It would take into account every modification on the structure, and
make form repopulation automatic
"""
data_keys = data.keys()
self.widgets = []
html_indexes = []
prefix = 'st' if self.flags is not None and 'FORCE_SCHEMA' in self.flags else ''
for data_key in data_keys:
match = re.match(name + '_(\d+)_%spair_0' % prefix, data_key)
if match is not None:
self.widgets.append(self.pair(sub_attrs=self.sub_attrs, attrs=self.attrs))
html_indexes.append(match.group(1))
else:
match = re.match(name + '_(\d+)_%ssubdict_0' % prefix, data_key)
if match is not None:
self.widgets.append(
self.subdict(sub_attrs=self.sub_attrs,
no_schema=0,
max_depth=self.max_depth,
flags=self.flags,
attrs=self.attrs)
)
html_indexes.append(match.group(1))
return [widget.value_from_datadict(
data, files,
'%s_%s_%s' % (name, html_indexes[i], widget.suffix))
for i, widget in enumerate(self.widgets)]
def format_output(self, name, rendered_widgets):
class_depth = ''
if self.max_depth is not None:
class_depth = 'depth_%s' % self.max_depth
params = {'id': "id_%s" % self.id_for_label(name),
'class_depth': class_depth,
'widgets': ''.join(rendered_widgets),
'add_id': 'add_id_%s' % self.id_for_label(name),
'add_sub_id': 'add_sub_id_%s' % self.id_for_label(name),
'add_field': ADD_FIELD_VERBOSE,
'add_dict': ADD_DICT_VERBOSE
}
if 'FORCE_SCHEMA' not in self.flags:
actions = """
<span id="%(add_id)s" class="add_pair_dictionary">%(add_field)s</span>
<span id="%(add_sub_id)s" class="add_sub_dictionary">
%(add_dict)s
</span>
""" % params
else:
actions = ''
params['actions'] = actions
return """
<ul id="%(id)s" class="dictionary %(class_depth)s">
%(widgets)s
</ul>
%(actions)s
""" % params
def update_widgets(self, keys, erase=False):
# import pdb
# pdb.set_trace()
if erase:
self.widgets = []
for k in keys:
if (isinstance(k[1], dict)):
self.widgets.append(
self.subdict(key_value=k[0], schema=k[1], no_schema=2,
max_depth=self.max_depth, flags=self.flags,
sub_attrs=self.sub_attrs, attrs=self.attrs))
else:
self.widgets.append(self.pair(sub_attrs=self.sub_attrs,
key_value=k[1],
attrs=self.attrs))
def _get_media(self):
"""
Mimic the MultiWidget '_get_media' method, adding other media
"""
if 'FORCE_SCHEMA' in self.flags:
media = Media()
else:
media = Media(js=MEDIAS)
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def dict_sort(self, d):
if isinstance(d, dict):
return OrderedDict([
(k, self.dict_sort(v))
for k, v in sorted(d.items())
])
else:
return d
class Pair(MultiWidget):
"""
A widget representing a key-value pair in a dictionary
"""
#default for a pair
key_type = TextInput
value_type = TextInput
suffix = 'pair'
def __init__(self, sub_attrs, key_value=None, attrs=None, **kwargs):
widgets = [self.key_type()] if callable(self.key_type) else []
if self.value_type in [TextInput, HiddenInput]:
if sub_attrs:
try:
widgets = [self.key_type(attrs=sub_attrs['key']), self.value_type(attrs=sub_attrs['value'])]
except KeyError:
raise KeyError("improper synthax for sub_attrs parameter")
else:
widgets = [self.key_type(), self.value_type()]
elif self.value_type == Dictionary:
if sub_attrs:
try:
widgets = [self.key_type(attrs=sub_attrs['key']), self.value_type(attrs=sub_attrs['value'], **kwargs)]
except KeyError:
raise KeyError("improper synthax for sub_attrs parameter")
else:
widgets = [self.key_type(), self.value_type(**kwargs)]
self.sub_attrs = sub_attrs
#raise error here ?
self.key_value = key_value if key_value is not None else ''
super(Pair, self).__init__(widgets, attrs)
#this method should be overwritten by subclasses
def decompress(self, value):
if value is not None:
return list(value)
else:
return ['', '']
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output, name))
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets, name):
return '<li>' + ' : '.join(rendered_widgets) + '<span class="del_pair" id="del_%s"> - Delete</span></li>\n' % name
class SubDictionary(Pair):
"""
A widget representing a key-value pair in a dictionary, where value is a dictionary
"""
key_type = TextInput
value_type = Dictionary
suffix = 'subdict'
def __init__(self, sub_attrs, schema=None, **kwargs):
if schema is None:
schema = {'key': 'value'}
super(SubDictionary, self).__init__(schema=schema,
sub_attrs=sub_attrs, **kwargs)
def decompress(self, value):
if value is not None:
return list(value)
else:
return ['', {}]
def format_output(self, rendered_widgets, name):
params = {
"widgets": ' : '.join(rendered_widgets),
"del_id": "del_%s" % name
}
return """
<li> %(widgets)s <span class="del_dict" id="%(del_id)s"> - Delete</span>
</li>""" % params
class StaticPair(Pair):
"""
A widget representing a key-value pair in a dictionary, where key is just
text (this is only relevant when FORCE_SCHEMA flag is used)
"""
key_type = HiddenInput
value_type = TextInput
suffix = 'stpair'
# def __init__(self, key_value, attrs=None):
# super(StaticPair, self).__init__(key_value=key_value, attrs=attrs)
def decompress(self, value):
value = super(StaticPair, self).decompress(value)
self.key_value = value[0]
return value
def format_output(self, rendered_widgets, name):
params = {
"html_class": self.sub_attrs.get('key', {}).get('class', ''),
"key": self.key_value,
"widgets": ''.join(rendered_widgets)
}
return """
<li><span class="static_key %(html_class)s">%(key)s</span> : %(widgets)s
</li>""" % params
class StaticSubDictionary(SubDictionary):
"""
A widget representing a key-value pair in a dictionary, where key is just
text (this is only relevant when FORCE_SCHEMA flag is used)
"""
key_type = HiddenInput
value_type = Dictionary
suffix = 'stsubdict'
def decompress(self, value):
value = super(StaticSubDictionary, self).decompress(value)
self.key_value = value[0]
return value
def format_output(self, rendered_widgets, name):
params = {
"html_class": self.sub_attrs.get('key', {}).get('class', ''),
"key": self.key_value,
"widgets": ''.join(rendered_widgets)
}
return """
<li><span class="static_key %(html_class)s">%(key)s</span> : %(widgets)s</li>
""" % params
class EmbeddedFieldWidget(MultiWidget):
"""
A widget that render each field found in the supplied form.
"""
def __init__(self, fields, attrs=None):
self.fields = fields
super(EmbeddedFieldWidget, self).__init__([f.widget for f in self.fields.values()], attrs)
def decompress(self, value):
"""
Retreieve each field value or provide the initial values
"""
if value:
return [value.__getitem__(field) for field in self.fields.keys()]
return [field.field.initial for field in self.fields.values()]
def format_label(self, field, counter):
"""
Format the label for each field
"""
return '<label for="id_formfield_%s" %s>%s:</label>' % (
counter, field.required and 'class="required"', field.label)
def format_help_text(self, field, counter):
"""
Format the help text for the bound field
"""
if field.help_text != None:
return '(<em>%s</em>)' % field.help_text
return ''
def format_output(self, rendered_widgets):
"""
This output will yeild all widgets grouped in a un-ordered list
"""
ret = ['<ul class="formfield">']
for i, field in enumerate(self.fields):
label = self.format_label(self.fields[field], i)
help_text = self.format_help_text(self.fields[field], i)
ret.append('<li>%s %s %s</li>' % (
label, help_text, rendered_widgets[i]))
ret.append('</ul>')
return u''.join(ret)
|
|
# Copyright 2010 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import cairo
import gtk
import logging
import os
import pango
import pangocairo
from chunks import StatementChunk, CommentChunk, BlankChunk
from cStringIO import StringIO
import doc_format
from statement import Statement, WarningResult, HelpResult, CustomResult
from style import DEFAULT_STYLE
_debug = logging.getLogger("WorksheetPrint").debug
# The two phases of printing
_MEASURE = 0
_RENDER = 1
class _Page(object):
def __init__(self, start_line, end_line):
self.start_line = start_line
self.end_line = end_line
class _PageLayout(object):
# This object does the actual work of page layout for a worksheet; it keeps
# of the state of the page layout, including things like the vertical position on the
# page. During the _MEASURE phase, one _PageLayout object is used for the entire
# worksheet. During rendering, we create a separate one per page. There's some
# confusion with pango.Layout in the name; pango.Layout's are used for individual
# paragraphs of text.
header_rule_spacing = 0.125 * 72. # 1/8th inch
header_rule_thickness = 1 # 1pt
def __init__(self, worksheet, context, phase, page_number=1, page_count=0):
self.worksheet = worksheet
self.context = context
self.font = pango.FontDescription("monospace 12")
self.phase = phase
if self.phase == _MEASURE:
self.pages = []
else: # phase == _RENDER
self.cr = self.context.get_cairo_context()
self.page_number = page_number
self.page_count = page_count
self.y = 0
self.page_start_line = None
# We track comments specially so we can group them with following code
# when breaking pages; these variables track comments we've seen when we
# haven't hit a following statement yet
self.comment_start_line = None
self.comment_start_y = None
def create_pango_layout(self, text, *styles):
layout = self.context.create_pango_layout()
layout.set_text(text)
layout.set_font_description(self.font)
# We need to save some stuff for when we render; just stick extra properties on the
# layout. We may want to move to a setup where we have "print objects" for layouts
# or custom results.
layout._left_margin = 0
layout._right_margin = 0
layout._paragraph_background = None
attrs = pango.AttrList()
for style in styles:
spec = DEFAULT_STYLE.get_spec(style)
spec.add_pango_attributes(attrs, start_index=0, end_index=0x7fffffff)
# We are fudging pixels vs. points here
if spec.left_margin:
layout._left_margin = spec.left_margin
if spec.left_margin:
layout._right_margin = spec.right_margin
if spec.paragraph_background:
layout._paragraph_background = pango.Color(spec.paragraph_background)
layout._width = self.context.get_width() - layout._left_margin - layout._right_margin
layout.set_width(int(pango.SCALE * layout._width))
layout.set_attributes(attrs)
return layout
def check_for_page_break(self, height):
# No break needed
if self.y + height <= self.context.get_height():
return
# BlankChunks never force page breaks
if isinstance(self.current_chunk, BlankChunk):
return
# We try to group comments with code, but if comments+<current chunk> overflows the page
# we break off the comments and put the current chunk by itself on a new page
if self.comment_start_line is None or self.y - self.comment_start_y + height > self.context.get_height():
start_line = self.current_chunk.start
start_y = self.chunk_start_y
if start_line == self.page_start_line:
# current chunk overflows the page, nothing we can do without more sophisticated
# logic to break up a single chunk when paginating, just let the overflow happen
return
# Remember that we've split off the comments
if isinstance(self.current_chunk, CommentChunk):
self.comment_start_line = start_line
self.comment_start_y = 0
else:
self.comment_start_line = None
self.comment_start_y = None
else:
start_line = self.comment_start_line
start_y = self.comment_start_y
self.comment_start_y = 0
self.pages.append(_Page(self.page_start_line, start_line))
self.y = self.y - start_y
self.chunk_start_y = 0
self.page_start_line = start_line
self.page_number += 1
self.page_count += 1
self.append_header()
def append_pango_layout(self, layout):
_, layout_height = layout.get_size()
layout_height = layout_height / pango.SCALE
if self.phase == _MEASURE:
self.check_for_page_break(layout_height)
else: # phase == _RENDER
if layout._paragraph_background is not None:
self.cr.save()
self.cr.set_source_rgb(layout._paragraph_background.red / 65535.,
layout._paragraph_background.green / 65535.,
layout._paragraph_background.blue / 65535.)
self.cr.rectangle(layout._left_margin,
self.y,
layout._width,
layout_height)
self.cr.fill()
self.cr.restore()
self.cr.move_to(layout._left_margin, self.y)
self.cr.show_layout(layout)
self.y += layout_height
def append_header(self):
if self.worksheet.filename is None:
filename = "Unsaved Worksheet"
else:
filename = os.path.basename(self.worksheet.filename)
left_layout = self.create_pango_layout(filename, 'header')
left_width, left_height = left_layout.get_size()
left_width /= pango.SCALE
left_height /= pango.SCALE
right_layout = self.create_pango_layout("Page %d of %d" % (self.page_number, self.page_count))
right_width, right_height = right_layout.get_size()
right_width /= pango.SCALE
right_height /= pango.SCALE
if self.phase == _RENDER:
self.cr.move_to(0, self.y)
self.cr.show_layout(left_layout)
self.cr.move_to(self.context.get_width() - right_width, self.y)
self.cr.show_layout(right_layout)
self.y += max(left_height, right_height)
self.y += self.header_rule_spacing + self.header_rule_thickness / 2
if self.phase == _RENDER:
self.cr.save()
self.cr.set_line_width(self.header_rule_thickness)
self.cr.move_to(0, self.y)
self.cr.line_to(self.context.get_width(), self.y)
self.cr.stroke()
self.cr.restore()
self.y += self.header_rule_spacing + self.header_rule_thickness / 2
def append_chunk_text(self, chunk):
text = self.worksheet.get_text(start_line=chunk.start, end_line=chunk.end - 1)
if isinstance(chunk, CommentChunk):
layout = self.create_pango_layout(text, 'comment')
else:
layout = self.create_pango_layout(text)
if isinstance(chunk, StatementChunk):
attrs = layout.get_attributes() # makes a copy
index = 0
# The complexity here is because Pango attributes encode positions by byte
# index for UTF-8 encoded text while we store the tokenization in Unicode
# character positions.
for i in xrange(chunk.start, chunk.end):
line = self.worksheet.get_line(i)
offset = 0
for token_type, start_offset, end_offset, _ in chunk.tokenized.get_tokens(i - chunk.start):
start_index = index + len(line[offset:start_offset].encode("UTF-8"))
end_index = start_index + len(line[start_offset:end_offset].encode("UTF-8"))
spec = DEFAULT_STYLE.get_spec(token_type)
if spec is not None:
spec.add_pango_attributes(attrs, start_index, end_index)
index = end_index
offset = end_offset
index += len(line[offset:].encode("UTF-8"))
index += 1 # newline
layout.set_attributes(attrs) # set the copy back
self.append_pango_layout(layout)
def append_chunk_results(self, chunk):
if not isinstance(chunk, StatementChunk):
return
def create_result_layout(text, style=None):
styles = ['result']
if style is not None:
styles.append(style)
if chunk.needs_execute:
styles.append('recompute')
return self.create_pango_layout(text, *styles)
if chunk.error_message:
layout = create_result_layout(chunk.error_message)
self.append_pango_layout(layout)
elif chunk.results is not None:
styles = ['result']
for result in chunk.results:
if isinstance(result, basestring):
layout = create_result_layout(result)
self.append_pango_layout(layout)
elif isinstance(result, WarningResult):
layout = create_result_layout(result.message, 'warning')
self.append_pango_layout(layout)
elif isinstance(result, HelpResult):
si = StringIO()
attrs = pango.AttrList()
index = [0] # array so we can mutate within nested function
def callback(text, bold):
if isinstance(text, unicode):
text = text.encode("UTF-8")
si.write(text)
end_index = index[0] + len(text)
if bold:
attrs.insert(pango.AttrWeight(pango.WEIGHT_BOLD, index[0], end_index))
index[0] = end_index
doc_format.format_docs(result.arg, callback)
layout = create_result_layout(si.getvalue(), 'help')
paragraph_attrs = layout.get_attributes()
paragraph_attrs.splice(attrs, 0, index[0])
layout.set_attributes(paragraph_attrs)
self.append_pango_layout(layout)
elif isinstance(result, CustomResult):
try:
if self.phase == _MEASURE:
height = result.print_result(self.context, render=False)
self.check_for_page_break(height)
else:
try:
self.cr.save()
self.cr.translate(0, self.y)
height = result.print_result(self.context, render=True)
finally:
self.cr.restore()
self.y += height
except NotImplementedError, e:
layout = create_result_layout(unicode(result))
self.append_pango_layout(layout)
def append_chunk(self, chunk):
if self.page_start_line is None:
self.append_header()
self.page_start_line = chunk.start
if isinstance(chunk, CommentChunk) and self.comment_start_line is None:
self.comment_start_line = chunk.start
self.comment_start_y = self.y
self.current_chunk = chunk
self.chunk_start_y = self.y
self.append_chunk_text(chunk)
self.append_chunk_results(chunk)
self.current_chunk = None
self.page_end_line = chunk.end
if isinstance(chunk, StatementChunk):
self.comment_start_line = None
self.comment_start_y = None
def finish(self):
if self.phase == _MEASURE and self.page_start_line is not None:
self.pages.append(_Page(self.page_start_line, self.page_end_line))
class WorksheetPrintOperation(gtk.PrintOperation):
"""
Subclass of gtk.PrintOperation to print a worksheet.
"""
__gsignals__ = {
}
def __init__(self, worksheet):
gtk.PrintOperation.__init__(self)
self.worksheet = worksheet
self.set_unit(gtk.UNIT_POINTS)
def do_begin_print(self, context):
page_layout = _PageLayout(self.worksheet, context, phase=_MEASURE)
for chunk in self.worksheet.iterate_chunks():
page_layout.append_chunk(chunk)
page_layout.finish()
self.__pages = page_layout.pages
self.set_n_pages(len(self.__pages))
def do_draw_page(self, context, page_nr):
page = self.__pages[page_nr]
page_layout = _PageLayout(self.worksheet, context, phase=_RENDER,
page_number=page_nr + 1, page_count=len(self.__pages))
for chunk in self.worksheet.iterate_chunks(page.start_line, page.end_line):
page_layout.append_chunk(chunk)
page_layout.finish()
# gtk.PrintOperation() doesn't work for exporting to PDF on windows, since it's
# it's still going through the windows printing system, which doesn't have native
# PDF export. But the code for printing expects a gtk.PrintContext, so we provide
# this mostly-compatible class that provides what is needed.
#
class PDFPrintContext(object):
def __init__(self, page_setup, cr):
self.page_setup = page_setup
self.paper_size = page_setup.get_paper_size()
self.pango_fontmap = pangocairo.cairo_font_map_get_default()
self.cr = cr
def get_cairo_context(self):
return self.cr
def get_page_setup(self):
return self.page_setup
def get_width(self):
return self.page_setup.get_page_width(gtk.UNIT_POINTS)
def get_height(self):
return self.page_setup.get_page_height(gtk.UNIT_POINTS)
def get_dpi_x(self):
return 72
def get_dpi_y(self):
return 72
def get_pango_fontmap(self):
return self.pango_fontmap
def create_pango_context(self):
pango_context = self.pango_fontmap.create_context()
options = cairo.FontOptions()
options.set_hint_metrics(cairo.HINT_METRICS_OFF)
pangocairo.context_set_font_options(pango_context, options)
pangocairo.context_set_resolution(pango_context, 72)
return pango_context
def create_pango_layout(self):
context = self.create_pango_context()
layout = pango.Layout(context)
self.cr.update_context(context)
return layout
def export_to_pdf(worksheet, filename, page_setup):
paper_size = page_setup.get_paper_size()
orientation = page_setup.get_orientation()
if (orientation == gtk.PAGE_ORIENTATION_PORTRAIT or
orientation == gtk.PAGE_ORIENTATION_REVERSE_PORTRAIT):
width = paper_size.get_width(gtk.UNIT_POINTS)
height = paper_size.get_height(gtk.UNIT_POINTS)
else:
width = paper_size.get_height(gtk.UNIT_POINTS)
height= paper_size.get_width(gtk.UNIT_POINTS)
surface = cairo.PDFSurface(filename, width, height)
raw_cr = cairo.Context(surface)
cr = pangocairo.CairoContext(raw_cr)
context = PDFPrintContext(page_setup, cr)
########################################
page_layout = _PageLayout(worksheet, context, phase=_MEASURE)
for chunk in worksheet.iterate_chunks():
page_layout.append_chunk(chunk)
page_layout.finish()
pages = page_layout.pages
cr.translate(page_setup.get_left_margin(gtk.UNIT_POINTS),
page_setup.get_right_margin(gtk.UNIT_POINTS))
########################################
for page_nr in xrange(0, len(pages)):
page = pages[page_nr]
page_layout = _PageLayout(worksheet, context, phase=_RENDER,
page_number=page_nr + 1, page_count=len(pages))
for chunk in worksheet.iterate_chunks(page.start_line, page.end_line):
page_layout.append_chunk(chunk)
page_layout.finish()
cr.show_page()
########################################
surface.finish()
|
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import operator
import numpy as np
from numpy.testing.utils import _gen_alignment_data
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_almost_equal, assert_allclose, assert_array_equal, IS_PYPY,
suppress_warnings
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
# This compares scalarmath against ufuncs.
class TestTypes(TestCase):
def test_types(self, level=1):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self, level=1):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self, level=1):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
class TestBaseMath(TestCase):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower(TestCase):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype.name != 'uint64':
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c)
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus(TestCase):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
class TestComplexDivision(TestCase):
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
def test_signed_zeros(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = (
(( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
(( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
(( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
(( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = list()
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by if condition as both are == 0
# is performed in test_zero_division(), so this is skipped
# trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
class TestConversion(TestCase):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal([int(_m) for _m in a], li[:3])
def test_iinfo_long_values(self):
for code in 'bBhH':
res = np.array(np.iinfo(code).max + 1, dtype=code)
tgt = np.iinfo(code).min
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.array(np.iinfo(code).max, dtype=code)
tgt = np.iinfo(code).max
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.typeDict[code](np.iinfo(code).max)
tgt = np.iinfo(code).max
assert_(res == tgt)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
np.typeDict[dtype](np.iinfo(dtype).max + 1)
for code in 'lLqQ':
assert_raises(OverflowError, overflow_error_func, code)
def test_longdouble_int(self):
# gh-627
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, x.__int__)
def test_numpy_scalar_relational_operators(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
def test_scalar_comparison_to_none(self):
# Scalars should just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
# This is dubious (see below):
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
# This is dubious (see below):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
# For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
# it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
#class TestRepr(TestCase):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr(object):
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
yield self._test_type_repr, t
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
class TestMultiply(TestCase):
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
# Some of this behaviour may be controversial and could be open for
# change.
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
for numpy_type in np.typecodes["AllInteger"]:
i = np.dtype(numpy_type).type(2)
assert_equal(seq * i, seq * int(i))
assert_equal(i * seq, int(i) * seq)
for numpy_type in np.typecodes["All"].replace("V", ""):
if numpy_type in np.typecodes["AllInteger"]:
continue
i = np.dtype(numpy_type).type()
assert_raises(TypeError, operator.mul, seq, i)
assert_raises(TypeError, operator.mul, i, seq)
def test_no_seq_repeat_basic_array_like(self):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
class ArrayLike(object):
def __init__(self, arr):
self.arr = arr
def __array__(self):
return self.arr
# Test for simple ArrayLike above and memoryviews (original report)
for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
class TestNegative(TestCase):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.neg(a) + a, 0)
class TestSubtract(TestCase):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0)
class TestAbs(TestCase):
def _test_abs_func(self, absfunc):
for tp in floating_types:
x = tp(-1.5)
assert_equal(absfunc(x), 1.5)
x = tp(0.0)
res = absfunc(x)
# assert_equal() checks zero signedness
assert_equal(res, 0.0)
x = tp(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
def test_builtin_abs(self):
self._test_abs_func(abs)
def test_numpy_abs(self):
self._test_abs_func(np.abs)
if __name__ == "__main__":
run_module_suite()
|
|
"""
homeassistant.components.sensor.openweathermap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OpenWeatherMap (OWM) service.
Configuration:
To use the OpenWeatherMap sensor you will need to add something like the
following to your configuration.yaml file.
sensor:
platform: openweathermap
api_key: YOUR_APP_KEY
forecast: 0 or 1
monitored_conditions:
- weather
- temperature
- wind_speed
- humidity
- pressure
- clouds
- rain
- snow
Variables:
api_key
*Required
To retrieve this value log into your account at http://openweathermap.org/
forecast
*Optional
Enables the forecast. The default is to display the current conditions.
monitored_conditions
*Required
Conditions to monitor. See the configuration example above for a
list of all available conditions to monitor.
Details for the API : http://bugs.openweathermap.org/projects/api/wiki
Only metric measurements are supported at the moment.
"""
import logging
from datetime import timedelta
from homeassistant.util import Throttle
from homeassistant.const import (CONF_API_KEY, TEMP_CELCIUS, TEMP_FAHRENHEIT)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyowm==2.2.1']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'weather': ['Condition', ''],
'temperature': ['Temperature', ''],
'wind_speed': ['Wind speed', 'm/s'],
'humidity': ['Humidity', '%'],
'pressure': ['Pressure', 'hPa'],
'clouds': ['Cloud coverage', '%'],
'rain': ['Rain', 'mm'],
'snow': ['Snow', 'mm']
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Get the OpenWeatherMap sensor. """
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
try:
from pyowm import OWM
except ImportError:
_LOGGER.exception(
"Unable to import pyowm. "
"Did you maybe not install the 'PyOWM' package?")
return False
SENSOR_TYPES['temperature'][1] = hass.config.temperature_unit
unit = hass.config.temperature_unit
forecast = config.get('forecast', 0)
owm = OWM(config.get(CONF_API_KEY, None))
if not owm:
_LOGGER.error(
"Connection error "
"Please check your settings for OpenWeatherMap.")
return False
data = WeatherData(owm, forecast, hass.config.latitude,
hass.config.longitude)
dev = []
try:
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(OpenWeatherMapSensor(data, variable, unit))
except KeyError:
pass
if forecast == 1:
SENSOR_TYPES['forecast'] = ['Forecast', '']
dev.append(OpenWeatherMapSensor(data, 'forecast', unit))
add_devices(dev)
# pylint: disable=too-few-public-methods
class OpenWeatherMapSensor(Entity):
""" Implements an OpenWeatherMap sensor. """
def __init__(self, weather_data, sensor_type, temp_unit):
self.client_name = 'Weather'
self._name = SENSOR_TYPES[sensor_type][0]
self.owa_client = weather_data
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
# pylint: disable=too-many-branches
def update(self):
""" Gets the latest data from OWM and updates the states. """
self.owa_client.update()
data = self.owa_client.data
fc_data = self.owa_client.fc_data
if self.type == 'weather':
self._state = data.get_detailed_status()
elif self.type == 'temperature':
if self.temp_unit == TEMP_CELCIUS:
self._state = round(data.get_temperature('celsius')['temp'],
1)
elif self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(data.get_temperature('fahrenheit')['temp'],
1)
else:
self._state = round(data.get_temperature()['temp'], 1)
elif self.type == 'wind_speed':
self._state = data.get_wind()['speed']
elif self.type == 'humidity':
self._state = data.get_humidity()
elif self.type == 'pressure':
self._state = round(data.get_pressure()['press'], 0)
elif self.type == 'clouds':
self._state = data.get_clouds()
elif self.type == 'rain':
if data.get_rain():
self._state = round(data.get_rain()['3h'], 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not raining'
self._unit_of_measurement = ''
elif self.type == 'snow':
if data.get_snow():
self._state = round(data.get_snow(), 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not snowing'
self._unit_of_measurement = ''
elif self.type == 'forecast':
self._state = fc_data.get_weathers()[0].get_status()
class WeatherData(object):
""" Gets the latest data from OpenWeatherMap. """
def __init__(self, owm, forecast, latitude, longitude):
self.owm = owm
self.forecast = forecast
self.latitude = latitude
self.longitude = longitude
self.data = None
self.fc_data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data from OpenWeatherMap. """
obs = self.owm.weather_at_coords(self.latitude, self.longitude)
self.data = obs.get_weather()
if self.forecast == 1:
obs = self.owm.three_hours_forecast_at_coords(self.latitude,
self.longitude)
self.fc_data = obs.get_forecast()
|
|
from __future__ import print_function
import errno
import json
import os
import platform
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import traceback
from pynailgun import NailgunConnection, NailgunException
from timing import monotonic_time_nanos
from tracing import Tracing
from subprocutils import check_output, CalledProcessError, which
MAX_BUCKD_RUN_COUNT = 64
BUCKD_CLIENT_TIMEOUT_MILLIS = 60000
GC_MAX_PAUSE_TARGET = 15000
JAVA_MAX_HEAP_SIZE_MB = 1000
# While waiting for the daemon to terminate, print a message at most
# every DAEMON_BUSY_MESSAGE_SECONDS seconds.
DAEMON_BUSY_MESSAGE_SECONDS = 1.0
# Describes a resource used by this driver.
# - name: logical name of the resources
# - executable: whether the resource should/needs execute permissions
# - basename: required basename of the resource
class Resource(object):
def __init__(self, name, executable=False, basename=None):
self.name = name
self.executable = executable
self.basename = name if basename is None else basename
# Resource that get propagated to buck via system properties.
EXPORTED_RESOURCES = [
Resource("testrunner_classes"),
Resource("abi_processor_classes"),
Resource("path_to_asm_jar"),
Resource("logging_config_file"),
Resource("path_to_pathlib_py", basename='pathlib.py'),
Resource("path_to_intellij_py"),
Resource("path_to_pex"),
Resource("path_to_pywatchman"),
Resource("path_to_scandir_py", basename='scandir.py'),
Resource("path_to_sh_binary_template"),
Resource("jacoco_agent_jar"),
Resource("report_generator_jar"),
Resource("path_to_static_content"),
Resource("path_to_pex", executable=True),
Resource("dx"),
Resource("android_agent_path"),
Resource("native_exopackage_fake_path"),
]
class CommandLineArgs:
def __init__(self, cmdline):
self.args = cmdline[1:]
self.buck_options = []
self.command = None
self.command_options = []
for arg in self.args:
if (self.command is not None):
self.command_options.append(arg)
elif (arg[:1]) == "-":
self.buck_options.append(arg)
else:
self.command = arg
# Whether this is a help command that doesn't run a build
# n.b. 'buck --help clean' is *not* currently a help command
# n.b. 'buck --version' *is* a help command
def is_help(self):
return self.command is None or "--help" in self.command_options
class RestartBuck(Exception):
pass
class BuckToolException(Exception):
pass
class BuckTool(object):
def __init__(self, buck_project):
self._command_line = CommandLineArgs(sys.argv)
self._buck_project = buck_project
self._tmp_dir = self._platform_path(buck_project.tmp_dir)
self._pathsep = os.pathsep
if (sys.platform == 'cygwin'):
self._pathsep = ';'
# Check whether the given resource exists.
def _has_resource(self, resource):
raise NotImplementedError()
# Return an on-disk path to the given resource. This may cause
# implementations to unpack the resource at this point.
def _get_resource(self, resource):
raise NotImplementedError()
def _use_buckd(self):
return not os.environ.get('NO_BUCKD') and not self._command_line.is_help()
def _environ_for_buck(self):
env = os.environ.copy()
env['CLASSPATH'] = str(self._get_bootstrap_classpath())
env['BUCK_CLASSPATH'] = str(self._get_java_classpath())
env['BUCK_TTY'] = str(int(sys.stdin.isatty()))
return env
def launch_buck(self, build_id):
with Tracing('BuckRepo.launch_buck'):
if self._command_line.command == "clean" and not self._command_line.is_help():
self.kill_buckd()
buck_version_uid = self._get_buck_version_uid()
use_buckd = self._use_buckd()
if not self._command_line.is_help():
has_watchman = bool(which('watchman'))
if use_buckd and has_watchman:
buckd_run_count = self._buck_project.get_buckd_run_count()
running_version = self._buck_project.get_running_buckd_version()
new_buckd_run_count = buckd_run_count + 1
if (buckd_run_count == MAX_BUCKD_RUN_COUNT or
running_version != buck_version_uid):
self.kill_buckd()
new_buckd_run_count = 0
if new_buckd_run_count == 0 or not self._is_buckd_running():
self.launch_buckd(buck_version_uid=buck_version_uid)
else:
self._buck_project.update_buckd_run_count(new_buckd_run_count)
elif use_buckd and not has_watchman:
print("Not using buckd because watchman isn't installed.",
file=sys.stderr)
elif not use_buckd:
print("Not using buckd because NO_BUCKD is set.",
file=sys.stderr)
env = self._environ_for_buck()
env['BUCK_BUILD_ID'] = build_id
buck_socket_path = self._buck_project.get_buckd_socket_path()
if use_buckd and self._is_buckd_running() and \
os.path.exists(buck_socket_path):
with Tracing('buck', args={'command': sys.argv[1:]}):
exit_code = 2
last_diagnostic_time = 0
while exit_code == 2:
with NailgunConnection('local:.buckd/sock',
cwd=self._buck_project.root) as c:
exit_code = c.send_command(
'com.facebook.buck.cli.Main',
sys.argv[1:],
env=env,
cwd=self._buck_project.root)
if exit_code == 2:
now = time.time()
if now - last_diagnostic_time > DAEMON_BUSY_MESSAGE_SECONDS:
print('Daemon is busy, waiting for it to become free...',
file=sys.stderr)
last_diagnostic_time = now
time.sleep(1)
return exit_code
command = ["buck"]
extra_default_options = [
"-Djava.io.tmpdir={0}".format(self._tmp_dir)
]
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main")
command.extend(sys.argv[1:])
return subprocess.call(command,
cwd=self._buck_project.root,
env=env,
executable=which("java"))
def launch_buckd(self, buck_version_uid=None):
with Tracing('BuckRepo.launch_buckd'):
self._setup_watchman_watch()
if buck_version_uid is None:
buck_version_uid = self._get_buck_version_uid()
# Override self._tmp_dir to a long lived directory.
buckd_tmp_dir = self._buck_project.create_buckd_tmp_dir()
ngserver_output_path = os.path.join(buckd_tmp_dir, 'ngserver-out')
'''
Use SoftRefLRUPolicyMSPerMB for immediate GC of javac output.
Set timeout to 60s (longer than the biggest GC pause seen for a 2GB
heap) and GC target to 15s. This means that the GC has to miss its
target by 100% or many 500ms heartbeats must be missed before a client
disconnection occurs. Specify port 0 to allow Nailgun to find an
available port, then parse the port number out of the first log entry.
'''
command = ["buckd"]
extra_default_options = [
"-Dbuck.buckd_launch_time_nanos={0}".format(monotonic_time_nanos()),
"-XX:MaxGCPauseMillis={0}".format(GC_MAX_PAUSE_TARGET),
"-XX:SoftRefLRUPolicyMSPerMB=0",
# Stop Java waking up every 50ms to collect thread
# statistics; doing it once every five seconds is much
# saner for a long-lived daemon.
"-XX:PerfDataSamplingInterval=5000",
# Do not touch most signals
"-Xrs",
# Likewise, waking up once per second just in case
# there's some rebalancing to be done is silly.
"-XX:+UnlockDiagnosticVMOptions",
"-XX:GuaranteedSafepointInterval=5000",
"-Djava.io.tmpdir={0}".format(buckd_tmp_dir),
"-Dcom.martiansoftware.nailgun.NGServer.outputPath={0}".format(
ngserver_output_path),
]
if is_java8_or_9():
extra_default_options.extend([
"-XX:+UseG1GC",
"-XX:MaxHeapFreeRatio=40",
])
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main$DaemonBootstrap")
command.append("local:.buckd/sock")
command.append("{0}".format(BUCKD_CLIENT_TIMEOUT_MILLIS))
'''
Change the process group of the child buckd process so that when this
script is interrupted, it does not kill buckd.
'''
def preexec_func():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 1)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
buck_socket_path = self._buck_project.get_buckd_socket_path()
# Make sure the Unix domain socket doesn't exist before this call.
try:
os.unlink(buck_socket_path)
except OSError as e:
if e.errno == errno.ENOENT:
# Socket didn't previously exist.
pass
else:
raise e
process = subprocess.Popen(
command,
executable=which("java"),
cwd=self._buck_project.root,
close_fds=True,
preexec_fn=preexec_func,
env=self._environ_for_buck())
self._buck_project.save_buckd_version(buck_version_uid)
self._buck_project.update_buckd_run_count(0)
# Give Java some time to create the listening socket.
for i in range(0, 100):
if not os.path.exists(buck_socket_path):
time.sleep(0.01)
returncode = process.poll()
# If the process hasn't exited yet, everything is working as expected
if returncode is None:
return 0
return returncode
def kill_buckd(self):
with Tracing('BuckRepo.kill_buckd'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if os.path.exists(buckd_socket_path):
print("Shutting down nailgun server...", file=sys.stderr)
try:
with NailgunConnection('local:.buckd/sock', cwd=self._buck_project.root) as c:
c.send_command('ng-stop')
except NailgunException as e:
if e.code not in (NailgunException.CONNECT_FAILED,
NailgunException.CONNECTION_BROKEN,
NailgunException.UNEXPECTED_CHUNKTYPE):
raise BuckToolException(
'Unexpected error shutting down nailgun server: ' +
str(e))
self._buck_project.clean_up_buckd()
def _setup_watchman_watch(self):
with Tracing('BuckRepo._setup_watchman_watch'):
if not which('watchman'):
message = textwrap.dedent("""\
Watchman not found, please install when using buckd.
See https://github.com/facebook/watchman for details.""")
if sys.platform == "darwin":
message += "\n(brew install watchman on OS X)"
# Bail if watchman isn't installed as we know java's
# FileSystemWatcher will take too long to process events.
raise BuckToolException(message)
print("Using watchman.", file=sys.stderr)
def _is_buckd_running(self):
with Tracing('BuckRepo._is_buckd_running'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if not os.path.exists(buckd_socket_path):
return False
try:
with NailgunConnection(
'local:.buckd/sock',
stdin=None,
stdout=None,
stderr=None,
cwd=self._buck_project.root) as c:
c.send_command('ng-stats')
except NailgunException as e:
if e.code == NailgunException.CONNECT_FAILED:
return False
else:
raise
return True
def _get_buck_version_uid(self):
raise NotImplementedError()
def _get_bootstrap_classpath(self):
raise NotImplementedError()
def _get_java_classpath(self):
raise NotImplementedError()
def _get_extra_java_args(self):
return []
def _get_java_args(self, version_uid, extra_default_options=[]):
java_args = [] if is_java8_or_9() else ["-XX:MaxPermSize=256m"]
java_args.extend([
"-Xmx{0}m".format(JAVA_MAX_HEAP_SIZE_MB),
"-Djava.awt.headless=true",
"-Djava.util.logging.config.class=com.facebook.buck.cli.bootstrapper.LogConfig",
"-Dbuck.test_util_no_tests_dir=true",
"-Dbuck.version_uid={0}".format(version_uid),
"-Dbuck.buckd_dir={0}".format(self._buck_project.buckd_dir),
"-Dorg.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.JavaUtilLog",
])
for resource in EXPORTED_RESOURCES:
if self._has_resource(resource):
java_args.append(
"-Dbuck.{0}={1}".format(
resource.name, self._get_resource(resource)))
if sys.platform == "darwin":
java_args.append("-Dbuck.enable_objc=true")
java_args.append("-Djava.library.path=" + os.path.dirname(
self._get_resource(
Resource("libjcocoa.dylib"))))
if os.environ.get("BUCK_DEBUG_MODE"):
java_args.append("-agentlib:jdwp=transport=dt_socket,"
"server=y,suspend=y,address=8888")
if os.environ.get("BUCK_DEBUG_SOY"):
java_args.append("-Dbuck.soy.debug=true")
java_args.extend(extra_default_options)
if self._buck_project.buck_javaargs:
java_args.extend(shlex.split(self._buck_project.buck_javaargs))
if self._buck_project.buck_javaargs_local:
java_args.extend(shlex.split(self._buck_project.buck_javaargs_local))
java_args.extend(self._get_extra_java_args())
extra_java_args = os.environ.get("BUCK_EXTRA_JAVA_ARGS")
if extra_java_args:
java_args.extend(shlex.split(extra_java_args))
return java_args
def _platform_path(self, path):
if sys.platform != 'cygwin':
return path
return subprocess.check_output(['cygpath', '-w', path]).strip()
_java8_or_9 = None
def is_java8_or_9():
global _java8_or_9
if _java8_or_9 is not None:
return _java8_or_9
try:
cmd = ['java', '-Xms64m', '-version']
output = check_output(cmd, stderr=subprocess.STDOUT)
version_line = output.strip().splitlines()[0]
m = re.compile('(openjdk|java) version "(1\.(8|9)\.|9).*').match(version_line)
_java8_or_9 = bool(m)
return _java8_or_9
except CalledProcessError as e:
print(e.output, file=sys.stderr)
raise e
def install_signal_handlers():
if os.name == 'posix':
signal.signal(
signal.SIGUSR1,
lambda sig, frame: traceback.print_stack(frame))
|
|
import os
import threading
from datetime import timedelta
from flask import make_response, current_app, request, render_template, jsonify
from apiclient import discovery, errors
from functools import update_wrapper
from app import app
from copy import deepcopy
from app import cache
from pybars import Compiler
import datetime
import base64
import re
compiler = Compiler()
fullmessageset = []
parsedmessageset = []
def getcachedthreads():
newcollection = None
cachedmessagesetids = cache.get('cachedmessagesetids')
if cachedmessagesetids:
for emailthreadid in cachedmessagesetids:
cachedthread = cache.get(emailthreadid['id'])
if cachedthread:
parsedmessageset.append(cachedthread)
newcollection = deepcopy(parsedmessageset)
parsedmessageset[:] = []
return newcollection
def rendercollection(newcollection):
basedir = os.path.abspath(os.path.dirname(__file__))
templatedir = os.path.join(basedir, 'static/piemail/www/libs/handlebars/templates/email-list.handlebars')
source = open(templatedir, "r").read().decode('utf-8')
template = compiler.compile(source)
output = template(newcollection)
return output
def getcontext(http_auth=None, retrievebody=None):
service = discovery.build('gmail', 'v1', http=http_auth)
results = service.users().threads().list(userId='me', maxResults=100, fields="threads/id", q="in:inbox").execute()
batch = service.new_batch_http_request(callback=processthreads)
# cache.set('cachedmessagesetids', results['threads'], timeout=300) # Cache for 5 minutes
for thread in results['threads']:
batch.add(service.users().threads().get(userId='me', id=thread['id'], fields="messages/snippet, "
"messages/internalDate, "
"messages/labelIds, "
"messages/threadId, "
"messages/payload/parts, "
"messages/payload/body, "
"messages/payload/headers"))
batch.execute()
for item in fullmessageset:
# t = threading.Thread(target=parse_item, kwargs={"item": item, "retrievebody": retrievebody})
# t.start()
parse_item(item, retrievebody)
newcollection = deepcopy(parsedmessageset)
fullmessageset[:] = []
parsedmessageset[:] = []
return newcollection
def getmessages(http_auth, threadid):
service = discovery.build('gmail', 'v1', http=http_auth)
threadid = threadid
try:
thread = service.users().threads().get(userId='me', id=threadid).execute()
except errors.HttpError, error:
print 'An error occurred: %s' % error
return jsonify(error)
batch = service.new_batch_http_request(callback=processmessages)
for message in thread['messages']:
batch.add(service.users().messages().get(userId='me', id=message['id']))
batch.execute()
for item in fullmessageset:
# m = threading.Thread(target=parse_item, kwargs={"item": item, "retrievebody": True})
# m.start()
parse_item(item, retrievebody=True)
response = dict()
response['iserror'] = False
response['savedsuccess'] = True
response['currentmessagelist'] = deepcopy(parsedmessageset)
fullmessageset[:] = []
parsedmessageset[:] = []
return response
def processthreads(request_id, response, exception):
if exception is not None:
pass
else:
fullmessageset.append((request_id, response['messages'][-1], len(response['messages'])))
def processmessages(request_id, response, exception):
if exception is not None:
pass
else:
fullmessageset.append((request_id, response))
def parse_item(item, retrievebody=False):
threaditems = dict()
# INBOX, CATEGORY_SOCIAL, CATEGORY_PERSONAL, CATEGORY_PROMOTIONS, CATEGORY_FORUMS, CATEGORY_UPDATES, SENT,
# PURCHASES, TRAVEL, FINANCE, STARRED, UNREAD, INBOX, IMPORTANT
threaditems['labels'] = item[1]['labelIds']
if 'UNREAD' in item[1]['labelIds']:
threaditems['unread'] = True
else:
threaditems['unread'] = False
if 'STARRED' in item[1]['labelIds']:
threaditems['star'] = True
else:
threaditems['star'] = False
if 'CATEGORY_PROMOTIONS' in item[1]['labelIds']:
threaditems['category'] = 'promotions'
if 'CATEGORY_SOCIAL' in item[1]['labelIds']:
threaditems['category'] = 'social'
# if 'CATEGORY_UPDATES' in item[1]['labelIds']:
# threaditems['category'] = 'updates'
# if 'CATEGORY_FORUMS' in item[1]['labelIds']:
# threaditems['category'] = 'forums'
if 'CATEGORY_SOCIAL' not in item[1]['labelIds'] and 'CATEGORY_PROMOTIONS' not in item[1]['labelIds']:
# and 'CATEGORY_UPDATES' not in item[1]['labelIds'] \
# and 'CATEGORY_FORUMS' not in item[1]['labelIds']:
threaditems['category'] = 'primary'
if 'INBOX' in item[1]['labelIds']:
threaditems['inbox'] = True
else:
threaditems['inbox'] = False
if 'INBOX' not in item[1]['labelIds'] and 'SENT' in item[1]['labelIds']:
threaditems['category'] = 'sent'
threaditems['inbox'] = True
threaditems['threadId'] = item[1]['threadId']
if 'id' in item[1] and 'id' != '':
threaditems['id'] = item[1]['id']
else:
threaditems['id'] = item[1]['threadId']
threaditems['snippet'] = item[1]['snippet'] + "..."
threaditems['timestamp'] = datetime.datetime.fromtimestamp(float(item[1]['internalDate'])/1000.)\
.strftime("%I:%M %p %b %d")
threaditems['start'] = datetime.datetime.fromtimestamp(float(item[1]['internalDate'])/1000.)
threaditems['sender'] = getheaders(item[1], "From")
if threaditems['sender'] == getheaders(item[1], "To"):
threaditems['sender'] = "Me"
threaditems['receiveddate'] = getheaders(item[1], "Date")
threaditems['subject'] = getheaders(item[1], "Subject")
if retrievebody:
threaditems['mailbody'] = getbody(item[1])
threaditems['rawtimestamp'] = item[1]['internalDate']
threaditems['ordinal'] = item[0]
if len(item) > 2: # Threads with messages
threaditems['length'] = item[2]
# cache.set(threaditems['id'], threaditems, timeout=300) # Cache for 5 minutes
parsedmessageset.append(threaditems)
def getheaders(item, key):
for header in item['payload']['headers']:
if header['name'] == key:
return header['value']
def getbody(message):
if 'parts' in message['payload']:
encodedbody = gethtmlpart(message['payload']['parts'])
else:
encodedbody = message['payload']['body']['data']
decodedbody = base64.urlsafe_b64decode(str(encodedbody))
decodedbody = \
re.sub(r'src="cid:([^"]+)"', "src='/static/piemail/www/icons/no_image_available.svg'", decodedbody) # cid hack
return decodedbody
def gethtmlpart(parts):
for part in parts:
if 'parts' not in part:
if part['mimeType'] == 'text/html':
return part['body']['data']
else:
return gethtmlpart(part['parts'])
return ''
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.context_processor
def inject_static_url():
local_static_url = app.static_url_path
static_url = 'https://s3.amazonaws.com/netbardus/'
if os.environ.get('HEROKU') is not None:
local_static_url = static_url
if not static_url.endswith('/'):
static_url += '/'
if not local_static_url.endswith('/'):
local_static_url += '/'
return dict(
static_url=static_url,
local_static_url=local_static_url,
host_url=request.url_root
)
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html', error=error.description), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html', error=error), 500
|
|
import tensorflow as tf
import prettytensor as pt
import numpy as np
import scipy.io as io
import argparse
import models
import sys
import os
import data_loader
from collections import defaultdict
from constants import *
from progressbar import ETA, Bar, Percentage, ProgressBar
from sklearn.metrics import precision_recall_curve, average_precision_score
saver = None
sess = None
np.random.seed(1234)
tf.set_random_seed(0)
parser = argparse.ArgumentParser(description='Epidemic Response System')
parser.add_argument('-wd', '--working_directory', help='directory for storing logs')
parser.add_argument('-sf', '--save_frequency', help='Number of epochs before saving')
parser.add_argument('--model_path', help='Stored model path')
parser.add_argument('mode', choices=('train', 'eval', 'extrapolate', 'etc_user'), help='train or eval')
args = parser.parse_args()
# Training Constants
learning_rate = 1e-4
batch_size = 1
num_timesteps = 25
num_feats = 3
max_epoch = 601
num_extrapolate = 20
dataset_size = 3069
updates_per_epoch = int(np.ceil(float(dataset_size) / float(batch_size)))
if args.working_directory:
working_directory = args.working_directory
else:
working_directory = 'trial/'
if args.save_frequency:
save_frequency = args.save_frequency
else:
save_frequency = 10
if args.model_path:
model_path = args.model_path
else:
model_path = 'trial/checkpoints/model.ckpt-600'
def get_loss(pred, gt):
return tf.div(tf.reduce_mean(tf.square(tf.sub(gt, pred))),
tf.constant(float(batch_size)))
def train():
with tf.device('/gpu:0'): # run on specific device
input_tensor, pred, gt = models.import_model(num_timesteps,
num_feats,
batch_size)
loss = get_loss(pred, gt)
optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=1.0)
train = optimizer.minimize(loss=loss)
dataset = data_loader.read_datasets(PREPROCESSED_DATA)
saver = tf.train.Saver() # defaults to saving all variables
# logging the loss function
loss_placeholder = tf.placeholder(tf.float32)
tf.scalar_summary('train_loss', loss_placeholder)
merged = tf.merge_all_summaries()
init = tf.initialize_all_variables()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
writer = tf.train.SummaryWriter(os.path.join(working_directory, 'logs'),
sess.graph_def)
sess.run(init)
for epoch in range(max_epoch):
training_loss = 0.0
widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
pbar = ProgressBar(updates_per_epoch, widgets=widgets)
pbar.start()
for i in range(updates_per_epoch):
pbar.update(i)
input_batch, gt_batch = dataset.next_batch(batch_size)
_, loss_value = sess.run([train, loss],
{input_tensor : input_batch,
gt : [gt_batch]})
training_loss += np.sum(loss_value)
training_loss = training_loss/(updates_per_epoch)
print("Loss %f" % training_loss)
# save model
if epoch % save_frequency == 0:
checkpoints_folder = os.path.join(working_directory, 'checkpoints')
if not os.path.exists(checkpoints_folder):
os.makedirs(checkpoints_folder)
saver.save(sess, os.path.join(checkpoints_folder, 'model.ckpt'),
global_step=epoch)
# save summaries
summary_str = sess.run(merged,
feed_dict={input_tensor : input_batch,
gt : [gt_batch],
loss_placeholder: training_loss})
writer.add_summary(summary_str, global_step=epoch)
writer.close()
def evaluate(print_grid=False):
with tf.device('/gpu:0'): # run on specific device
input_tensor, pred, gt = models.import_model(num_timesteps,
num_feats,
batch_size)
dataset = data_loader.read_datasets(PREPROCESSED_DATA, dataset_type='test')
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, model_path)
all_pred, all_gt = [], []
for i in range(updates_per_epoch):
input_batch, gt_batch = dataset.next_batch(batch_size)
pred_value = sess.run([pred],
{input_tensor : input_batch,
gt : [gt_batch]})
all_pred.append(pred_value)
all_gt.append(gt_batch)
num_align = 0
rmse = []
for i in range(len(all_pred)):
if all_pred[i] == all_gt[i]: num_align += 1
rmse.append(np.sqrt(np.power((all_pred[i] - all_gt[i]), 2)))
print "Accuracy:", float(num_align)/len(all_pred)
print "Avg. RMSE", np.mean(rmse)
print "Variance RMSE", np.var(rmse)
def init_model(model_path):
global saver
global sess
saver = tf.train.Saver()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver.restore(sess, model_path)
def close_session():
global sess
sess.close()
def extrapolate(history_file, etc_dict=None):
global saver
global sess
# etc_dict is a dict mapping from province to # of new ETCs there
with tf.device('/gpu:0'): # run on specific device
input_tensor, pred, gt = models.import_model(num_timesteps,
num_feats,
batch_size)
# dataset should be [num_provinces x (num_timesteps, num_feats)]
data, provinces = np.load(history_file)
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, model_path)
all_extrapolated = defaultdict(list)
all_new_values = defaultdict(list)
for province, province_data in zip(provinces, data):
# for one province
# get lat and lon
lat, lon = province_data[0, 1:]
extrapolated = []
new_values = []
old_value = province_data[-1, 0]
for t in range(num_extrapolate):
pred_value = sess.run([pred],
{input_tensor: province_data})[0][0][0]
if pred_value < 0:
pred_value = 0
extrapolated.append(pred_value)
new_value = pred_value
if etc_dict and province in etc_dict:
new_value = old_value + ((pred_value - old_value) * (1/(etc_dict[province] + 3)))
# new_value *= (1 - etc_dict[province] * 0.1)
old_value = pred_value
new_values.append(new_value)
new_sample = np.array([new_value, lat, lon])
new_sample = np.reshape(new_sample, (1, -1))
province_data = province_data[1:, :]
province_data = np.concatenate((province_data, new_sample), axis=0)
# make example with [pred_value, lat, lon]
# remove first element in input batch and add extrapolated
all_extrapolated[province] = extrapolated
all_new_values[province] = new_values
for i, province in enumerate(provinces):
print province
print data[i]
print all_extrapolated[province]
print all_new_values[province]
# np.save('all_extrapolated', all_extrapolated)
return all_extrapolated
if __name__ == "__main__":
if args.mode == 'train':
train()
elif args.mode == 'eval':
evaluate(print_grid=False)
elif args.mode == 'extrapolate':
extrapolate(PREPROCESSED_GUINEA_DATA_EXTRA)
elif args.mode == 'etc_user':
etc_dict = {
"macenta" : 2,
"coyah" : 1,
"kerouane" : 1
}
extrapolate(PREPROCESSED_GUINEA_DATA_EXTRA, etc_dict)
|
|
#!/usr/bin/env python
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import service_configuration_lib
class TestServiceConfigurationLib:
fake_service_configuration = {
'fake_service1': {
'deployed_to': None,
'monitoring': {
'fake_monitoring_key': 'fake_monitoring_value',
},
'deploy': {},
'port': 11111,
'runs_on': [
'fake_hostname3',
'fake_hostname2',
'fake_hostname1',
],
},
'fake_service2': {
'deployed_to': [
'fake_deployed_hostname1',
'fake_deployed_hostname2',
'fake_hostname4',
],
'monitoring': {},
'port': 22222,
'runs_on': [
'fake_hostname2',
'fake_hostname3',
'fake_hostname4',
],
},
'fake_service3': {
'deployed_to': None,
'monitoring': {},
'port': 33333,
'runs_on': [
'fake_hostname3',
'fake_hostname4',
'fake_hostname5',
],
'env_runs_on': {
'fake_env1': ['fake_hostname3'],
'fake_env2': ['fake_hostname4', 'fake_hostname5'],
},
'needs_puppet_help': True,
},
'fake_service4': {
'deployed_to': True,
'runs_on': [],
'needs_puppet_help': True,
},
'fake_service5': {
'deployed_to': [],
'runs_on': [],
'needs_puppet_help': True,
},
}
def test_generate_service_info_should_have_all_keys(self):
"""I'm not entirely sure what this test is testing since I can add a
new value or remove an old value and the test passes without changing
any code. I simplified it to make it less misleading and focus on the
one thing it does to, which is test that the arg service_information is
updated.
"""
fake_service_information = {'fakekey2': 'fakevalue2'}
fake_port = 9999
actual = service_configuration_lib.generate_service_info(
fake_service_information,
port=fake_port,
)
expected = {
# Can't use the fake_service_information because it's an
# un-nested hash at this point
'fakekey2': 'fakevalue2',
'port': fake_port,
}
assert expected == actual
def test_read_monitoring_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_monitoring_file = 'fake_monitoring_file'
# TODO: Mock open?
actual = service_configuration_lib.read_monitoring(
fake_monitoring_file,
)
assert expected == actual
def test_read_deploy_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_deploy_file = 'fake_deploy_file'
# TODO: Mock open?
actual = service_configuration_lib.read_deploy(
fake_deploy_file,
)
assert expected == actual
def test_read_smartstack_should_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_smartstack_file = 'fake_smartstack_file'
# TODO: Mock open?
actual = service_configuration_lib.read_smartstack(
fake_smartstack_file,
)
assert expected == actual
def test_read_dependencies_return_empty_when_file_doesnt_exist(self):
expected = {}
fake_dependencies_file = 'fake_dependencies_file'
# TODO: Mock open?
actual = service_configuration_lib.read_smartstack(
fake_dependencies_file,
)
assert expected == actual
def test_services_that_run_on_should_properly_read_configuration(self):
expected = ['fake_service1', 'fake_service2']
fake_hostname = 'fake_hostname2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_that_run_on(fake_hostname, fake_service_configuration)
assert sorted(expected) == sorted(actual)
def test_services_that_run_on_should_return_an_empty_array_when_the_hostname_isnt_anywhere(self):
expected = []
fake_hostname = 'non_existent_fake_hostname2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_that_run_on(fake_hostname, fake_service_configuration)
assert sorted(expected) == sorted(actual)
def test_services_deployed_to_should_return_deployed_and_running_services(self):
expected = ['fake_service1', 'fake_service2', 'fake_service3', 'fake_service4']
fake_hostname = 'fake_hostname3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_deployed_on(fake_hostname, fake_service_configuration)
assert set(expected) == set(actual)
def test_services_needing_puppet_help_on_should_properly_read_configuration(self):
expected = ['fake_service3', 'fake_service4']
fake_hostname = 'fake_hostname4'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.services_needing_puppet_help_on(fake_hostname, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_run_should_properly_return_the_right_nodes(self):
expected = ['fake_hostname3', 'fake_hostname4', 'fake_hostname5']
fake_service = 'fake_service3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_run(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_removes_duplicates(self):
expected = [
'fake_deployed_hostname1',
'fake_deployed_hostname2',
'fake_hostname2',
'fake_hostname3',
'fake_hostname4',
]
fake_service = 'fake_service2'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_with_no_deploys_to(self):
expected = ['fake_hostname3', 'fake_hostname4', 'fake_hostname5']
fake_service = 'fake_service3'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
def test_all_nodes_that_receive_is_sorted(self):
expected = ['fake_hostname1', 'fake_hostname2', 'fake_hostname3']
fake_service = 'fake_service1'
fake_service_configuration = self.fake_service_configuration
actual = service_configuration_lib.all_nodes_that_receive(fake_service, fake_service_configuration)
assert expected == actual
@mock.patch('os.path.abspath', return_value='nodir')
@mock.patch('os.listdir', return_value=['1', '2', '3'])
@mock.patch('service_configuration_lib.read_service_configuration_from_dir', return_value='hello')
def test_read_services_configuration(self, read_patch, listdir_patch, abs_patch):
expected = {'1': 'hello', '2': 'hello', '3': 'hello'}
actual = service_configuration_lib.read_services_configuration(soa_dir='testdir')
abs_patch.assert_called_once_with('testdir')
listdir_patch.assert_called_once_with('nodir')
read_patch.assert_has_calls(
[mock.call('nodir', '1'), mock.call('nodir', '2'), mock.call('nodir', '3')],
)
assert expected == actual
@mock.patch('os.path.abspath', return_value='nodir')
@mock.patch('os.listdir', return_value=['1', '2', '3'])
def test_list_services(self, listdir_patch, abs_patch):
expected = ['1', '2', '3']
actual = service_configuration_lib.list_services(soa_dir='testdir')
abs_patch.assert_called_once_with('testdir')
listdir_patch.assert_called_once_with('nodir')
assert expected == actual
def test_read_soa_metadata(self, tmpdir):
soa_dir = tmpdir.mkdir('test_read_soa_metadata')
metadata_file = soa_dir.join('.metadata.json')
metadata_file.write('{"hello":"world"}')
actual_metadata = service_configuration_lib.read_soa_metadata(soa_dir=str(soa_dir))
assert actual_metadata == {'hello': 'world'}
def test_read_soa_metadata_dne(self, tmpdir):
soa_dir = tmpdir.mkdir('test_read_soa_metadata_dne')
actual_metadata = service_configuration_lib.read_soa_metadata(soa_dir=str(soa_dir))
assert actual_metadata == {}
@mock.patch('service_configuration_lib.read_service_configuration_from_dir', return_value='bye')
@mock.patch('os.path.abspath', return_value='cafe')
def test_read_service_configuration(self, abs_patch, read_patch):
expected = 'bye'
actual = service_configuration_lib.read_service_configuration('boba', soa_dir='tea')
abs_patch.assert_called_once_with('tea')
read_patch.assert_called_once_with('cafe', 'boba')
assert expected == actual
@mock.patch('os.path.join', return_value='forever_joined')
@mock.patch('service_configuration_lib.read_port', return_value='1111')
@mock.patch('service_configuration_lib.read_monitoring', return_value='no_monitoring')
@mock.patch('service_configuration_lib.read_deploy', return_value='no_deploy')
@mock.patch('service_configuration_lib.read_data', return_value='no_data')
@mock.patch('service_configuration_lib.read_smartstack', return_value={})
@mock.patch('service_configuration_lib.read_service_information', return_value='no_info')
@mock.patch('service_configuration_lib.read_dependencies', return_value='no_dependencies')
@mock.patch('service_configuration_lib.generate_service_info', return_value={'oof': 'ouch'})
def test_read_service_configuration_from_dir(
self,
gen_patch,
deps_patch,
info_patch,
smartstack_patch,
data_patch,
deploy_patch,
monitoring_patch,
port_patch,
join_patch,
):
expected = {'oof': 'ouch'}
actual = service_configuration_lib.read_service_configuration_from_dir('never', 'die')
join_patch.assert_has_calls([
mock.call('never', 'die', 'port'),
mock.call('never', 'die', 'monitoring.yaml'),
mock.call('never', 'die', 'deploy.yaml'),
mock.call('never', 'die', 'data.yaml'),
mock.call('never', 'die', 'smartstack.yaml'),
mock.call('never', 'die', 'service.yaml'),
mock.call('never', 'die', 'dependencies.yaml'),
])
port_patch.assert_called_once_with('forever_joined')
monitoring_patch.assert_called_once_with('forever_joined')
deploy_patch.assert_called_once_with('forever_joined')
data_patch.assert_called_once_with('forever_joined')
smartstack_patch.assert_called_once_with('forever_joined')
info_patch.assert_called_once_with('forever_joined')
deps_patch.assert_called_once_with('forever_joined')
gen_patch.assert_called_once_with(
'no_info', port='1111',
monitoring='no_monitoring',
deploy='no_deploy',
data='no_data',
dependencies='no_dependencies',
smartstack={},
)
assert expected == actual
@mock.patch('os.path.join', return_value='together_forever')
@mock.patch('os.path.abspath', return_value='real_soa_dir')
@mock.patch('service_configuration_lib.read_yaml_file', return_value={'what': 'info'})
def test_read_extra_service_information(self, info_patch, abs_patch, join_patch):
expected = {'what': 'info'}
actual = service_configuration_lib.read_extra_service_information(
'noname',
'noinfo', soa_dir='whatsadir',
)
abs_patch.assert_called_once_with('whatsadir')
join_patch.assert_called_once_with('real_soa_dir', 'noname', 'noinfo.yaml')
info_patch.assert_called_once_with('together_forever', deepcopy=True)
assert expected == actual
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'data': 'mock'})
def testread_yaml_file_single(self, load_patch, open_patch):
expected = {'data': 'mock'}
filename = 'fake_fname_uno'
actual = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_called_once_with(filename, 'r', encoding='UTF-8')
load_patch.assert_called_once_with(open_patch.return_value.__enter__().read())
assert expected == actual
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'mmmm': 'tests'})
def testread_yaml_file_with_cache(self, load_patch, open_patch):
expected = {'mmmm': 'tests'}
filename = 'fake_fname_dos'
service_configuration_lib.enable_yaml_cache()
actual = service_configuration_lib.read_yaml_file(filename)
actual_two = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_called_once_with(filename, 'r', encoding='UTF-8')
load_patch.assert_called_once_with(open_patch.return_value.__enter__().read())
assert expected == actual
assert expected == actual_two
# When we cache, we can NOT return a pointer to the original object
# because the caller might mutate it. We need to ensure that
# the returned object is a copy.
assert expected is not actual_two
@mock.patch('io.open', autospec=True)
@mock.patch('service_configuration_lib.load_yaml', return_value={'water': 'slide'})
def testread_yaml_file_no_cache(self, load_patch, open_patch):
expected = {'water': 'slide'}
filename = 'fake_fname_tres'
service_configuration_lib.disable_yaml_cache()
actual = service_configuration_lib.read_yaml_file(filename)
actual_two = service_configuration_lib.read_yaml_file(filename)
open_patch.assert_any_call(filename, 'r', encoding='UTF-8')
assert open_patch.call_count == 2
load_patch.assert_any_call(open_patch.return_value.__enter__().read())
assert load_patch.call_count == 2
assert expected == actual
assert expected == actual_two
def test_env_runs_on(self):
expected = ['fake_hostname3']
actual = service_configuration_lib.all_nodes_that_run_in_env(
'fake_service3',
'fake_env1',
service_configuration=self.fake_service_configuration,
)
assert expected == actual
expected = ['fake_hostname4', 'fake_hostname5']
actual = service_configuration_lib.all_nodes_that_run_in_env(
'fake_service3',
'fake_env2',
service_configuration=self.fake_service_configuration,
)
assert expected == actual
def test_bad_port_get_service_from_port(self):
'Test for bad inputs'
service_name = service_configuration_lib.get_service_from_port(None)
assert service_name is None
service_name = service_configuration_lib.get_service_from_port({})
assert service_name is None
def test_valid_port_get_service_from_port(self):
'Test that if there is a service for that port it returns it'
all_services = {
'Other Service': {
'port': 2352,
},
'Service 23': {
'port': 656,
},
'Test Service': {
'port': 100,
},
'Smart Service': {
'port': 345,
'smartstack': {
'main': {
'proxy_port': 3444,
},
},
},
'Service 36': {
'port': 636,
},
}
found_service_name = service_configuration_lib.get_service_from_port(100, all_services)
assert found_service_name == 'Test Service'
found_service_name = service_configuration_lib.get_service_from_port(3444, all_services)
assert found_service_name == 'Smart Service'
|
|
"""SCons.Platform.posix
Platform-specific initialization for POSIX (Linux, UNIX, etc.) systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import errno
import os
import os.path
import subprocess
import sys
import select
import SCons.Util
from SCons.Platform import TempFileMunge
exitvalmap = {
2 : 127,
13 : 126,
}
def escape(arg):
"escape shell special characters"
slash = '\\'
special = '"$()'
arg = arg.replace(slash, slash+slash)
for c in special:
arg = arg.replace(c, slash+c)
return '"' + arg + '"'
def exec_system(l, env):
stat = os.system(' '.join(l))
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_spawnvpe(l, env):
stat = os.spawnvpe(os.P_WAIT, l[0], l, env)
# os.spawnvpe() returns the actual exit code, not the encoding
# returned by os.waitpid() or os.system().
return stat
def exec_fork(l, env):
pid = os.fork()
if not pid:
# Child process.
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process.
pid, stat = os.waitpid(pid, 0)
if stat & 0xff:
return stat | 0x80
return stat >> 8
def _get_env_command(sh, escape, cmd, args, env):
s = ' '.join(args)
if env:
l = ['env', '-'] + \
[escape(t[0])+'='+escape(t[1]) for t in env.items()] + \
[sh, '-c', escape(s)]
s = ' '.join(l)
return s
def env_spawn(sh, escape, cmd, args, env):
return exec_system([_get_env_command( sh, escape, cmd, args, env)], env)
def spawnvpe_spawn(sh, escape, cmd, args, env):
return exec_spawnvpe([sh, '-c', ' '.join(args)], env)
def fork_spawn(sh, escape, cmd, args, env):
return exec_fork([sh, '-c', ' '.join(args)], env)
def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
stdout_eof = stderr_eof = 0
while not (stdout_eof and stderr_eof):
try:
(i,o,e) = select.select([cmd_stdout, cmd_stderr], [], [])
if cmd_stdout in i:
str = cmd_stdout.read()
if len(str) == 0:
stdout_eof = 1
elif stdout is not None:
stdout.write(str)
if cmd_stderr in i:
str = cmd_stderr.read()
if len(str) == 0:
#sys.__stderr__.write( "stderr_eof=1\n" )
stderr_eof = 1
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
except select.error, (_errno, _strerror):
if _errno != errno.EINTR:
raise
def exec_popen3(l, env, stdout, stderr):
proc = subprocess.Popen(' '.join(l),
stdout=stdout,
stderr=stderr,
shell=True)
stat = proc.wait()
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_piped_fork(l, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
if stdout != stderr:
(rFdOut, wFdOut) = os.pipe()
(rFdErr, wFdErr) = os.pipe()
else:
(rFdOut, wFdOut) = os.pipe()
rFdErr = rFdOut
wFdErr = wFdOut
# do the fork
pid = os.fork()
if not pid:
# Child process
os.close( rFdOut )
if rFdOut != rFdErr:
os.close( rFdErr )
os.dup2( wFdOut, 1 ) # is there some symbolic way to do that ?
os.dup2( wFdErr, 2 )
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process
pid, stat = os.waitpid(pid, 0)
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
childOut = os.fdopen( rFdOut )
if stdout != stderr:
childErr = os.fdopen( rFdErr )
else:
childErr = childOut
process_cmd_output(childOut, childErr, stdout, stderr)
os.close( rFdOut )
if stdout != stderr:
os.close( rFdErr )
if stat & 0xff:
return stat | 0x80
return stat >> 8
def piped_env_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using Popen3 combined with the env command
# the command name and the command's stdout is written to stdout
# the command's stderr is written to stderr
return exec_popen3([_get_env_command(sh, escape, cmd, args, env)],
env, stdout, stderr)
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
return exec_piped_fork([sh, '-c', ' '.join(args)],
env, stdout, stderr)
def generate(env):
# If os.spawnvpe() exists, we use it to spawn commands. Otherwise
# if the env utility exists, we use os.system() to spawn commands,
# finally we fall back on os.fork()/os.exec().
#
# os.spawnvpe() is prefered because it is the most efficient. But
# for Python versions without it, os.system() is prefered because it
# is claimed that it works better with threads (i.e. -j) and is more
# efficient than forking Python.
#
# NB: Other people on the scons-users mailing list have claimed that
# os.fork()/os.exec() works better than os.system(). There may just
# not be a default that works best for all users.
if 'spawnvpe' in os.__dict__:
spawn = spawnvpe_spawn
elif env.Detect('env'):
spawn = env_spawn
else:
spawn = fork_spawn
if env.Detect('env'):
pspawn = piped_env_spawn
else:
pspawn = piped_fork_spawn
if 'ENV' not in env:
env['ENV'] = {}
env['ENV']['PATH'] = '/usr/local/bin:/opt/bin:/bin:/usr/bin'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = ''
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = '$LIBPREFIX'
env['SHLIBSUFFIX'] = '.so'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['PSPAWN'] = pspawn
env['SPAWN'] = spawn
env['SHELL'] = 'sh'
env['ESCAPE'] = escape
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
#Based on LINUX: ARG_MAX=ARG_MAX=131072 - 3000 for environment expansion
#Note: specific platforms might rise or lower this value
env['MAXLINELENGTH'] = 128072
# This platform supports RPATH specifications.
env['__RPATH'] = '$_RPATH'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from textwrap import dedent
import mock
from pants.build_graph.target import Target
from pants_test.task_test_base import TaskTestBase
from pants.contrib.node.subsystems.resolvers.node_preinstalled_module_resolver import \
NodePreinstalledModuleResolver
from pants.contrib.node.subsystems.resolvers.npm_resolver import NpmResolver
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_preinstalled_module import NodePreinstalledModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_resolve import NodeResolve
class NodeResolveTest(TaskTestBase):
@classmethod
def task_type(cls):
return NodeResolve
def setUp(self):
super(NodeResolveTest, self).setUp()
NodeResolve.register_resolver_for_type(NodePreinstalledModule, NodePreinstalledModuleResolver)
NodeResolve.register_resolver_for_type(NodeModule, NpmResolver)
def tearDown(self):
super(NodeResolveTest, self).tearDown()
NodeResolve._clear_resolvers()
def test_register_resolver_for_type(self):
NodeResolve._clear_resolvers()
self.assertIsNone(NodeResolve._resolver_for_target(NodePreinstalledModule))
self.assertIsNone(NodeResolve._resolver_for_target(NodeModule))
node_preinstalled__module_target = self.make_target(
spec=':empty_fake_node_preinstalled_module_target',
target_type=NodePreinstalledModule)
NodeResolve.register_resolver_for_type(NodePreinstalledModule, NodePreinstalledModuleResolver)
self.assertEqual(NodePreinstalledModuleResolver,
NodeResolve._resolver_for_target(node_preinstalled__module_target))
node_module_target = self.make_target(spec=':empty_fake_node_module_target',
target_type=NodeModule)
NodeResolve.register_resolver_for_type(NodeModule, NpmResolver)
self.assertEqual(NpmResolver,
NodeResolve._resolver_for_target(node_module_target))
def test_product_types(self):
self.assertEqual([NodePaths], NodeResolve.product_types())
def test_noop(self):
task = self.create_task(self.context())
task.execute()
def test_noop_na(self):
target = self.make_target(spec=':not_a_node_target', target_type=Target)
task = self.create_task(self.context(target_roots=[target]))
task.execute()
def test_resolve_simple(self):
typ = self.make_target(spec='3rdparty/node:typ', target_type=NodeRemoteModule, version='0.6.3')
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
self.create_file('src/node/util/util.js', contents=dedent("""
var typ = require('typ');
console.log("type of boolean is: " + typ.BOOLEAN);
"""))
target = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['util.js', 'package.json'],
dependencies=[typ])
context = self.context(target_roots=[target], options={
'npm-resolver': {'install_optional': False}
})
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(target)
self.assertIsNotNone(node_path)
script_path = os.path.join(node_path, 'util.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
self.assertIn('type of boolean is: boolean', out)
def test_resolve_simple_graph(self):
typ1 = self.make_target(spec='3rdparty/node:typ1',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.x')
typ2 = self.make_target(spec='3rdparty/node:typ2',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.1')
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
self.create_file('src/node/util/typ.js', contents=dedent("""
var typ = require('typ');
module.exports = {
BOOL: typ.BOOLEAN
};
"""))
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['typ.js', 'package.json'],
dependencies=[typ1])
self.create_file('src/node/leaf/package.json', contents=dedent("""
{
"name": "leaf",
"version": "0.0.1"
}
"""))
self.create_file('src/node/leaf/leaf.js', contents=dedent("""
var typ = require('typ');
var util_typ = require('util/typ');
console.log("type of boolean is: " + typ.BOOLEAN);
console.log("type of bool is: " + util_typ.BOOL);
"""))
leaf = self.make_target(spec='src/node/leaf',
target_type=NodeModule,
sources=['leaf.js', 'package.json'],
dependencies=[util, typ2])
context = self.context(target_roots=[leaf], options={
'npm-resolver': {'install_optional': False}
})
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
self.assertIsNotNone(node_paths.node_path(util))
node_path = node_paths.node_path(leaf)
self.assertIsNotNone(node_paths.node_path(leaf))
# Verify the 'typ' package is not duplicated under leaf. The target dependency tree is:
# leaf
# typ2 (0.6.1)
# util
# typ1 (0.6.x)
# If we install leaf normally, NPM will install the typ2 target (typ version 0.6.1) at the top
# level under leaf, and then not install the typ1 target (typ version 0.6.x) under util
# because the dependency is already satisfied.
typ_packages = []
for root, _, files in os.walk(node_path):
for f in files:
if 'package.json' == f:
with open(os.path.join(root, f)) as fp:
package = json.load(fp)
if 'typ' == package['name']:
typ_packages.append(os.path.relpath(os.path.join(root, f), node_path))
self.assertEqual(1, len(typ_packages),
'Expected to find exactly 1 de-duped `typ` package, but found these:'
'\n\t{}'.format('\n\t'.join(sorted(typ_packages))))
script_path = os.path.join(node_path, 'leaf.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
lines = {line.strip() for line in out.splitlines()}
self.assertIn('type of boolean is: boolean', lines)
self.assertIn('type of bool is: boolean', lines)
def test_resolve_preserves_package_json(self):
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['package.json'],
dependencies=[])
self.create_file('src/node/scripts_project/package.json', contents=dedent("""
{
"name": "scripts_project",
"version": "1.2.3",
"dependencies": { "A": "file://A" },
"devDependencies": { "B": "file://B" },
"peerDependencies": { "C": "file://C" },
"optionalDependencies": { "D": "file://D" },
"scripts": {
"test": "mocha */dist.js"
}
}
"""))
scripts_project = self.make_target(spec='src/node/scripts_project',
target_type=NodeModule,
sources=['package.json'],
dependencies=[util])
context = self.context(target_roots=[scripts_project], options={
'npm-resolver': {'install_optional': False}
})
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(scripts_project)
self.assertIsNotNone(node_paths.node_path(scripts_project))
package_json_path = os.path.join(node_path, 'package.json')
with open(package_json_path) as fp:
package = json.load(fp)
self.assertEqual('scripts_project', package['name'],
'Expected to find package name of `scripts_project`, but found: {}'
.format(package['name']))
self.assertEqual('1.2.3', package['version'],
'Expected to find package version of `1.2.3`, but found: {}'
.format(package['version']))
self.assertEqual('mocha */dist.js', package['scripts']['test'],
'Expected to find package test script of `mocha */dist.js`, but found: {}'
.format(package['scripts']['test']))
self.assertEqual(node_paths.node_path(util), package['dependencies']['util'])
self.assertNotIn('A', package['dependencies'])
self.assertNotIn('devDependencies', package)
self.assertNotIn('peerDependencies', package)
self.assertNotIn('optionalDependencies', package)
def _test_resolve_optional_install_helper(
self, install_optional, package_manager, expected_params):
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
self.create_file('src/node/util/util.js', contents=dedent("""
var typ = require('typ');
console.log("type of boolean is: " + typ.BOOLEAN);
"""))
# yarn execution path requires yarn.lock
self.create_file('src/node/util/yarn.lock', contents='')
target = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['util.js', 'package.json', 'yarn.lock'],
dependencies=[],
package_manager=package_manager)
context = self.context(target_roots=[target], options={
'npm-resolver': {'install_optional': install_optional}
})
task = self.create_task(context)
package_manager_obj = task.get_package_manager(target=target)
with mock.patch.object(package_manager_obj, 'run_command') as exec_call:
exec_call.return_value.run.return_value.wait.return_value = 0
task.execute()
exec_call.assert_called_once_with(
args=expected_params,
node_paths=None)
def test_resolve_default_no_optional_install_npm(self):
self._test_resolve_optional_install_helper(
install_optional=False,
package_manager='npm',
expected_params=['install', '--no-optional'])
def test_resolve_optional_install_npm(self):
self._test_resolve_optional_install_helper(
install_optional=True,
package_manager='npm',
expected_params=['install'])
def test_resolve_default_no_optional_install_yarn(self):
self._test_resolve_optional_install_helper(
install_optional=False,
package_manager='yarnpkg',
expected_params=['--non-interactive', '--ignore-optional', '--frozen-lockfile'])
def test_resolve_optional_install_yarn(self):
self._test_resolve_optional_install_helper(
install_optional=True,
package_manager='yarnpkg',
expected_params=['--non-interactive', '--frozen-lockfile'])
|
|
#!/usr/bin/env python
#
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
# Forked by: Mike McKerns (December 2013)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2013-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/klepto/LICENSE
"""
Utilities for fast persistence of big data, with optional compression.
"""
import traceback
import sys
import os
import zlib
import warnings
import dill # add some dill magic to pickle
import pickle
try:
_basestring = basestring
except NameError:
_basestring = str
try:
from io import BytesIO
except ImportError: # support python2.5
from StringIO import StringIO as BytesIO
if sys.version_info[0] >= 3:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
asbytes = str
_MEGA = 2 ** 20
_MAX_LEN = len(hex(2 ** 64))
# To detect file types
_ZFILE_PREFIX = asbytes('ZF')
###############################################################################
# Compressed file with Zlib
def _read_magic(file_handle):
""" Utility to check the magic signature of a file identifying it as a
Zfile
"""
magic = file_handle.read(len(_ZFILE_PREFIX))
# Pickling needs file-handles at the beginning of the file
file_handle.seek(0)
return magic
def read_zfile(file_handle):
"""Read the z-file and return the content as a string
Z-files are raw data compressed with zlib used internally
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
assert _read_magic(file_handle) == _ZFILE_PREFIX, \
"File does not have the right magic"
length = file_handle.read(len(_ZFILE_PREFIX) + _MAX_LEN)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally
for persistence. Backward compatibility is not guarantied. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex(len(data))
if sys.version_info[0] < 3 and type(length) is long:
# We need to remove the trailing 'L' in the hex representation
length = length[:-1]
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
""" An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass):
"Store the useful information for later"
self.filename = filename
self.subclass = subclass
def read(self, unpickler):
"Reconstruct the array"
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
if unpickler.np.__version__ >= '1.3':
array = unpickler.np.load(filename,
mmap_mode=unpickler.mmap_mode)
else:
# Numpy does not have mmap_mode before 1.3
array = unpickler.np.load(filename)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__')
and not self.subclass in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
new_array.__array_prepare__(array)
array = new_array
return array
#def __reduce__(self):
# return None
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tostring) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"Store the useful information for later"
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"Reconstruct the array from the meta-information and the z-file"
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
data = read_zfile(open(filename, 'rb'))
state = self.state + (data,)
array.__setstate__(state)
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist of big data efficiently.
The main features of this object are:
* persistence of numpy arrays in separate .npy files, for which
I/O is fast.
* optional compression using Zlib, with a special care on avoid
temporaries.
"""
def __init__(self, filename, compress=0, cache_size=10):
self._filename = filename
self._filenames = [filename, ]
self.cache_size = cache_size
self.compress = compress
if not self.compress:
self.file = open(filename, 'wb')
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
self._npy_counter = 0
Pickler.__init__(self, self.file,
protocol=dill.DEFAULT_PROTOCOL)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _write_array(self, array, filename):
if not self.compress:
self.np.save(filename, array)
container = NDArrayWrapper(os.path.basename(filename),
type(array))
else:
filename += '.z'
# Efficient compressed storage:
# The meta data is stored in the container, and the core
# numerics in a z-file
_, init_args, state = array.__reduce__()
# the last entry of 'state' is the data itself
zfile = open(filename, 'wb')
write_zfile(zfile, state[-1],
compress=self.compress)
zfile.close()
state = state[:-1]
container = ZNDArrayWrapper(os.path.basename(filename),
init_args, state)
return container, filename
def save(self, obj):
""" Subclass the save method, to save ndarray subclasses in npy
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix, self.np.memmap):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
# disk, it is more efficient to use standard pickling
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj) #FIXME: really? test this...
return Pickler.save(self, obj)
self._npy_counter += 1
try:
filename = '%s_%02i.npy' % (self._filename,
self._npy_counter)
# This converts the array in a container
obj, filename = self._write_array(obj, filename)
self._filenames.append(filename)
except:
self._npy_counter -= 1
# XXX: We should have a logging mechanism
print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
return Pickler.save(self, obj)
def close(self):
if self.compress:
zfile = open(self._filename, 'wb')
write_zfile(zfile,
self.file.getvalue(), self.compress)
zfile.close()
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return file_handle
def load_build(self):
""" This method is called to set the state of a newly created
object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError('Trying to unpickle an ndarray, '
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
# Be careful to register our new method.
if sys.version_info[0] >= 3:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
class ZipNumpyUnpickler(NumpyUnpickler):
"""A subclass of our Unpickler to unpickle on the fly from
compressed storage."""
def __init__(self, filename, file_handle):
NumpyUnpickler.__init__(self, filename,
file_handle,
mmap_mode=None)
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
###############################################################################
# Utility functions
def dump(value, filename, compress=0, cache_size=100):
"""Fast persistence of an arbitrary Python object into a files, with
dedicated storage for numpy arrays.
Parameters
-----------
value: any Python object
The object to store to disk
filename: string
The name of the file in which it is to be stored
compress: integer for 0 to 9, optional
Optional compression level for the data. 0 is no compression.
Higher means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
cache_size: positive number, optional
Fixes the order of magnitude (in megabytes) of the cache used
for in-memory compression. Note that this is just an order of
magnitude estimate and that for big arrays, the code will go
over this value at dump and at load time.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if compress is True:
# By default, if compress is enabled, we want to be using 3 by
# default
compress = 3
if not isinstance(filename, _basestring):
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename, %s (type %s) was given'
% (filename, type(filename))
)
try:
pickler = NumpyPickler(filename, compress=compress,
cache_size=cache_size)
pickler.dump(value)
pickler.close()
finally:
if 'pickler' in locals() and hasattr(pickler, 'file'):
pickler.file.flush()
pickler.file.close()
return pickler._filenames
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with load.
Parameters
-----------
filename: string
The name of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has not effect for compressed files. Note that in this
case the reconstructed object might not longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmaped.
"""
file_handle = open(filename, 'rb')
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data are stored in
# companion files, moving the directory will create a race when
# trying to access the companion files.
if _read_magic(file_handle) == _ZFILE_PREFIX:
if mmap_mode is not None:
warnings.warn('file "%(filename)s" appears to be a zip, '
'ignoring mmap_mode "%(mmap_mode)s" flag passed'
% locals(), Warning, stacklevel=2)
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
else:
unpickler = NumpyUnpickler(filename,
file_handle=file_handle,
mmap_mode=mmap_mode)
try:
obj = unpickler.load()
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table, Text
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
services = Table('services', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
certificates = Table('certificates', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('user_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('project_id',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('file_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
consoles = Table('consoles', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_name',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('instance_id', Integer()),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('port', Integer(), nullable=True),
Column('pool_id',
Integer(),
ForeignKey('console_pools.id')),
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('username',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('password',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('console_type',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('public_hostname',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('compute_host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id',
Integer(),
ForeignKey('instances.id')),
Column('action',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('error',
Text(length=None, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('target_num', Integer()),
Column('host',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('volume_id',
Integer(),
ForeignKey('volumes.id'),
nullable=True),
)
#
# Tables to alter
#
auth_tokens = Table('auth_tokens', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('token_hash',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
primary_key=True,
nullable=False),
Column('user_id', Integer()),
Column('server_manageent_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('storage_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('cdn_management_url',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
instances_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
instances_locked = Column('locked',
Boolean(create_constraint=True, name=None))
networks_cidr_v6 = Column(
'cidr_v6',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
networks_ra_server = Column(
'ra_server',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
services_availability_zone = Column(
'availability_zone',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
tables = [certificates, console_pools, consoles, instance_actions,
iscsi_targets]
for table in tables:
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
meta.drop_all(tables=tables)
raise
auth_tokens.c.user_id.alter(type=String(length=255,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False))
instances.create_column(instances_availability_zone)
instances.create_column(instances_locked)
networks.create_column(networks_cidr_v6)
networks.create_column(networks_ra_server)
services.create_column(services_availability_zone)
|
|
import time
import RPi.GPIO as GPIO
import matplotlib.pyplot as plt
# Use BCM GPIO references
# instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
class MotorControl:
def __init__(self):
#specify params
self.forwardSpeed=30
self.turnSpeed=40
self.maxSpeed=1.7*self.forwardSpeed
self.turnTime=0.25#0.32 1.71
self.wallDist=5
self.errorOld=0
self.errorIntegrated=0
'''
#working PD control!!!
self.P=0.1
self.D=3.5
'''
self.P=0.15
self.I=0.00
self.D=15
#evaluation
self.errorVec=[]
self.urVec=[]
self.totalErr=0
self.count=0
#init all GPIO pins
# vel=forward (A), dir=backward (B)
'''
self.velLeftPin=9 #pin21
self.dirLeftPin=25 #pin22
self.velRightPin=11 #pin23
self.dirRightPin=8 #pin24
'''
self.velRightPin=25 #pin22
self.dirRightPin=9 #pin21
self.velLeftPin=8 #pin24
self.dirLeftPin=11 #pin23
GPIO.setup(self.velLeftPin,GPIO.OUT)
GPIO.setup(self.dirLeftPin,GPIO.OUT)
GPIO.setup(self.velRightPin,GPIO.OUT)
GPIO.setup(self.dirRightPin,GPIO.OUT)
# init PWM pins
self.pwmLeftA=GPIO.PWM(self.velLeftPin, 500) # 500Hz PWM
self.pwmRightA=GPIO.PWM(self.velRightPin, 500)
self.pwmLeftA.start(0)
self.pwmRightA.start(0)
self.pwmLeftB=GPIO.PWM(self.dirLeftPin, 500) # 500Hz PWM
self.pwmRightB=GPIO.PWM(self.dirRightPin, 500)
self.pwmLeftB.start(0)
self.pwmRightB.start(0)
#stop all wheels
def stop(self):
self.pwmLeftA.ChangeDutyCycle(0)
self.pwmRightA.ChangeDutyCycle(0)
self.pwmLeftB.ChangeDutyCycle(0)
self.pwmRightB.ChangeDutyCycle(0)
#make a hard stop on all wheels
def stopHard(self):
self.pwmLeftA.ChangeDutyCycle(100)
self.pwmRightA.ChangeDutyCycle(100)
self.pwmLeftB.ChangeDutyCycle(100)
self.pwmRightB.ChangeDutyCycle(100)
time.sleep(0.5)
self.stop()
'''
def moveForward(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
if ul<(self.wallDist-0.5):
#turn left wheel more
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed*1.3)
print "going right!"
elif ul>(self.wallDist+0.5) and ul<50:
#turn right wheel more
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed*1.3)
print "going left!"
def moveForwardControlled(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
#control loop if distance to wall is not appropriate
if ul<20:
error=ul-self.wallDist
if error<=0:
print ("going right with "+str(1+self.P*-error))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1+self.P*-error),self.maxSpeed,100]))
else:
print ("going left with "+str(1+self.P*error))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+self.P*error),self.maxSpeed,100]))
def moveForwardControlledPID(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed)
#control loop if distance to wall is not appropriate
if ul<20:
error=ul-self.wallDist
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going right with "+str((1-u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.maxSpeed,100]))
else:
print ("going left with "+str((1+u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.maxSpeed,100]))
self.errorOld=error
'''
def moveForwardControlledPIDboth(self,ul,ur,uf):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed+3)
#control loop if distance to left wall is not appropriate
if ul<20 and ul >3:
error=ul-self.wallDist
### Evaluation
self.totalErr+=abs(error)
self.errorVec.append(error)
self.urVec.append(ur)
self.count+=1
###
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going right with "+str((1-u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.maxSpeed,100]))
else:
print ("going left with "+str((1+u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.maxSpeed,100]))
self.errorOld=error
time.sleep(0.07)
elif ur<20:
error=ur-self.wallDist
### Evaluation
self.totalErr+=abs(error)
self.errorVec.append(error)
self.count+=1
###
self.errorIntegrated+=error
u=self.P*error+self.D*(error-self.errorOld)+self.I*self.errorIntegrated
if u<=0:
print ("going left with "+str((1-u)))
self.pwmRightA.ChangeDutyCycle(min([self.forwardSpeed*(1-u),self.forwardSpeed*9.4,100]))
else:
print ("going right with "+str((1+u)))
self.pwmLeftA.ChangeDutyCycle(min([self.forwardSpeed*(1+u),self.forwardSpeed*9.4,100]))
self.errorOld=error
time.sleep(0.07)
else:
time.sleep(0.07)
#time.sleep(0.08)#time.sleep(0.05)
self.stop()
time.sleep(0.02)
# move both wheels backward
def moveBack(self):
self.pwmLeftB.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightB.ChangeDutyCycle(self.forwardSpeed)
#move both wheels forward
def moveFront(self):
self.pwmLeftA.ChangeDutyCycle(self.forwardSpeed)
self.pwmRightA.ChangeDutyCycle(self.forwardSpeed+3)
#make a turn to the left
def turnLeft(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel forward
self.pwmRightA.ChangeDutyCycle(self.turnSpeed)
#turn left wheel backward
self.pwmLeftB.ChangeDutyCycle(self.turnSpeed)
#wait
time.sleep(self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
#make a turn to the right
def turnRight(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel backward
self.pwmRightB.ChangeDutyCycle(self.turnSpeed)
#turn left wheel forward
self.pwmLeftA.ChangeDutyCycle(self.turnSpeed)
#wait
time.sleep(self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
#make a 180 degree turn
def turnBack(self):
#stop both wheels
self.stop()
time.sleep(0.3)
#turn right wheel backward
self.pwmRightB.ChangeDutyCycle(self.turnSpeed)
#turn left wheel forward
self.pwmLeftA.ChangeDutyCycle(self.turnSpeed)
#wait long
time.sleep(1.8*self.turnTime)
# stop both wheels
self.stop()
time.sleep(0.3)
# function for plotting the distance of the robot to the wall
# this can be used for evaluation and parameter tuning
def plotError(self,errorVec,urVec):
zeroVec=[]
wallVec=[]
contVec=[]
for i in range(len(errorVec)):
zeroVec.append(0)
wallVec.append(self.wallDist)
contVec.append(i)
plt.plot(contVec,errorVec,contVec,zeroVec)#,contVec,urVec,contVec,wallVec)
plt.ylabel('error')
plt.xlabel('timestep')
plt.show()
def kill(self):
# Reset GPIO settings
self.stop()
GPIO.cleanup()
print ("Total average error is: "+str(self.totalErr/self.count))
#self.plotError(self.errorVec,self.urVec)
|
|
import os
import sys
from functools import partial
import traceback
import time
def short_repr(s):
v = repr(s)
if len(v) > 20:
return v[:12] + "..." + v[-5:]
return v
class Model:
def __init__(self, env, history):
self.files = {}
self.tasks = []
self.env = env
self.history = history
def apply_command(self, command):
command.apply_to_environment(self.env)
command.apply_to_model(self)
self.history.save_command(command)
def add_task(self, key, runner):
for item in list(self.tasks):
if item[0] == key:
self.tasks.remove(item)
self.tasks.append((key, runner))
def run_tasks(self):
while self.tasks:
runner = self.tasks.pop(0)[1]
try:
runner()
except:
print("Error running task", runner)
traceback.print_exc()
def on_open(self, router):
self.history.clean_commands()
commands = self.history.get_commands()
if not commands:
commands = self.env.init_commands()
for command in commands:
router.send(command)
class Command:
attrs = None
name = None
_id = None
def __init__(self, *, id=None):
self._id = id
@property
def id(self):
# FIXME: I'm not sure timestamps done lazily like this really gives
# the order we want, but... I guess?
if not self._id:
self._id = "c-%s" % time.time()
return self._id
@property
def asJson(self):
attrs = self.attrs or self.__dict__.keys()
attrs = [a for a in attrs if a != "_id"]
data = dict((attr, getattr(self, attr)) for attr in attrs)
data["command"] = self.name or self.__class__.__name__
data["id"] = self.id
return data
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
" ".join(
'%s=%s' % (name, short_repr(value)) for name, value in sorted(self.asJson.items())
if name != "command"),
)
def apply_to_model(self, model):
pass
def apply_to_environment(self, env):
pass
def scan_back(self, prev_commands):
pass
class FileEdit(Command):
def __init__(self, *, filename, content, external_edit=False, id=None):
super().__init__(id=id)
self.filename = filename
self.content = content
self.external_edit = external_edit
def apply_to_environment(self, env):
if self.external_edit:
# Then it is already applied to the environment
return
filename = os.path.abspath(os.path.join(env.path, self.filename))
if not filename.startswith(env.path):
raise Exception("Bad file: {} resolves to {}, not in base {}".format(self.filename, filename, env.path))
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, "w") as fp:
fp.write(self.content)
def apply_to_model(self, model):
if self.filename not in model.files:
model.files[self.filename] = {}
model.files[self.filename]["content"] = self.content
model.add_task(("analyze", self.filename), partial(model.env.analyze, self.filename, self.content))
def scan_back(self, commands):
for prev in reversed(commands):
if isinstance(prev, Execution) and prev.filename == self.filename:
break
elif isinstance(prev, FileEdit) and prev.filename == self.filename:
yield prev
elif isinstance(prev, Analysis) and prev.filename == self.filename:
yield prev
class FileDelete(Command):
def __init__(self, *, filename, external_edit=False, id=None):
super().__init__(id=id)
self.filename = filename
self.external_edit = external_edit
def apply_to_environment(self, env):
if self.external_edit:
return
filename = os.path.abspath(os.path.join(env.path, self.filename))
if not filename.startswith(env.path):
raise Exception("Bad file: {} resolves to {}, not in base {}".format(self.filename, filename, env.path))
if os.path.exists(filename):
os.unlink(filename)
else:
print("Warning: delete of file {} resolves to {}, which does not exists".format(
self.filename, filename))
def apply_to_model(self, model):
if self.filename in model.files:
del model.files[self.filename]
class ExecutionRequest(Command):
def __init__(self, *, filename, content, subexpressions=False, id=None):
super().__init__(id=id)
self.filename = filename
self.content = content
self.subexpressions = subexpressions
def apply_to_model(self, model):
model.add_task(("execute_request", self.filename), partial(model.env.execute, self.filename, self.content, self.subexpressions))
def scan_back(self, commands):
yield self
class Analysis(Command):
def __init__(self, *, filename, content, properties, id=None):
super().__init__(id=id)
self.filename = filename
self.content = content
self.properties = properties
def apply_to_model(self, model):
if self.filename not in model.files:
return
model.files[self.filename]["analysis"] = {
"content": self.content,
"properties": self.properties,
}
class Execution(Command):
def __init__(self, *, filename, content, emitted, defines, start_time, end_time, exec_time, with_subexpressions=False, id=None):
super().__init__(id=id)
self.filename = filename
self.content = content
self.emitted = emitted
self.defines = defines
self.start_time = start_time
self.end_time = end_time
self.exec_time = exec_time
self.with_subexpressions = with_subexpressions
def apply_to_model(self, model):
if self.filename not in model.files:
return
model.files[self.filename]["execution"] = {
"content": self.content,
"emitted": self.emitted,
"defines": self.defines,
}
no_default = ['NO DEFAULT']
def hydrate(data, *, if_invalid=no_default):
me = sys.modules[__name__]
command_name = data["command"]
CommandClass = getattr(me, command_name)
assert issubclass(CommandClass, Command)
assert CommandClass is not Command
del data["command"]
try:
command = CommandClass(**data)
except TypeError as e:
raise
if if_invalid is no_default:
raise
return if_invalid
return command
|
|
"""Parses GTFS feeds urls:
https://transit.land/ - Transitland
http://transitfeeds.com/feeds - Openmobilitydata
Crawls all the urls, loads feed zips and extracts to the specified directory."""
import argparse
import concurrent.futures
import io
import json
import logging
import os
import time
import zipfile
import requests
MAX_RETRIES = 3
AVG_SLEEP_TIMEOUT_S = 10
MAX_SLEEP_TIMEOUT_S = 30
URLS_FILE_TRANSITLAND = "feed_urls_transitland.txt"
URLS_FILE_OMD = "feed_urls_openmobilitydata.txt"
THREADS_COUNT = 2
MAX_INDEX_LEN = 4
HEADERS_OMD = {"Accept": "application/json"}
logger = logging.getLogger(__name__)
def get_feeds_links(data):
"""Extracts feed urls from the GTFS json description."""
gtfs_feeds_urls = []
for feed in data:
if feed["feed_format"] != "gtfs" or feed["spec"] != "gtfs":
# Warning about strange format - not gtfs and not real-time gtfs:
if feed["feed_format"] != "gtfs-rt":
logger.warning(f"Skipped feed: feed_format {feed['feed_format']}, spec {feed['spec']}")
continue
if "url" in feed and feed["url"] is not None and feed["url"]:
gtfs_feeds_urls.append(feed["url"])
return gtfs_feeds_urls
def parse_transitland_page(url):
"""Parses page with feeds list, extracts feeds urls and the next page url."""
retries = MAX_RETRIES
while retries > 0:
with requests.get(url) as response:
if response.status_code != 200:
logger.error(f"Failed loading feeds: {response.status_code}")
if response.status_code == 429:
logger.error("Too many requests.")
time.sleep(MAX_SLEEP_TIMEOUT_S)
else:
time.sleep(AVG_SLEEP_TIMEOUT_S)
retries -= 1
continue
data = json.loads(response.text)
if "feeds" in data:
gtfs_feeds_urls = get_feeds_links(data["feeds"])
else:
gtfs_feeds_urls = []
next_page = data["meta"]["next"] if "next" in data["meta"] else ""
return gtfs_feeds_urls, next_page
return [], ""
def extract_to_path(content, out_path):
"""Reads content as zip and extracts it to out_path."""
try:
archive = zipfile.ZipFile(io.BytesIO(content))
archive.extractall(path=out_path)
return True
except zipfile.BadZipfile:
logger.exception("BadZipfile exception.")
return False
def load_gtfs_feed_zip(path, url):
"""Downloads url-located zip and extracts it to path/index."""
retries = MAX_RETRIES
while retries > 0:
try:
with requests.get(url, stream=True) as response:
if response.status_code != 200:
logger.error(f"HTTP code {response.status_code} loading gtfs {url}")
retries -= 1
time.sleep(MAX_SLEEP_TIMEOUT_S)
continue
if not extract_to_path(response.content, path):
retries -= 1
logger.error(f"Could not extract zip: {url}")
continue
return True
except requests.exceptions.RequestException as ex:
logger.error(f"Exception {ex} for url {url}")
retries -= 1
time.sleep(AVG_SLEEP_TIMEOUT_S)
return False
def parse_openmobilitydata_pages(omd_api_key):
url_page = "https://api.transitfeeds.com/v1/getFeeds"
url_with_redirect = "https://api.transitfeeds.com/v1/getLatestFeedVersion"
page = pages_count = 1
urls = []
while page <= pages_count:
params = {
"key": omd_api_key,
"page": page,
"location": "undefined",
"descendants": 1,
"limit": 100,
"type": "gtfs"
}
with requests.get(url_page, params=params, headers=HEADERS_OMD) as response:
if response.status_code != 200:
logger.error(f"Http code {response.status_code} loading feed ids: {url_page}")
return [], ""
data = json.loads(response.text)
if page == 1:
pages_count = data["results"]["numPages"]
logger.info(f"Pages count {pages_count}")
for feed in data["results"]["feeds"]:
params = {
"key": omd_api_key,
"feed": feed["id"]
}
with requests.get(url_with_redirect, params=params, headers=HEADERS_OMD, allow_redirects=True) \
as response_redirect:
if response_redirect.history:
urls.append(response_redirect.url)
else:
logger.error(f"Could not get link to zip with feed {feed['id']} from {url_with_redirect}")
logger.info(f"page {page}")
page += 1
return urls
def write_list_to_file(path, lines):
"""Saves list of lines to path."""
with open(path, "w") as out:
out.write("\n".join(lines))
def crawl_transitland_for_feed_urls(out_path):
"""Crawls transitland feeds API and parses feeds urls from json on each page
Do not try to parallel it because of the Transitland HTTP requests restriction."""
start_page = "https://api.transit.land/api/v1/feeds/?per_page=50"
total_feeds = []
gtfs_feeds_urls, next_page = parse_transitland_page(start_page)
while next_page:
logger.info(f"Loaded {next_page}")
total_feeds += gtfs_feeds_urls
gtfs_feeds_urls, next_page = parse_transitland_page(next_page)
if gtfs_feeds_urls:
total_feeds += gtfs_feeds_urls
write_list_to_file(os.path.join(out_path, URLS_FILE_TRANSITLAND), total_feeds)
def get_filename(file_prefix, index):
index_str = str(index)
index_len = len(index_str)
zeroes_prefix = "" if MAX_INDEX_LEN < index_len else "0" * (MAX_INDEX_LEN - index_len)
return file_prefix + "_" + zeroes_prefix + index_str
def load_gtfs_zips_from_urls(path, urls_file, threads_count, file_prefix):
"""Concurrently downloads feeds zips from urls to path."""
urls = [url.strip() for url in open(os.path.join(path, urls_file))]
if not urls:
logger.error(f"Empty urls from {path}")
return
logger.info(f"Preparing to load feeds: {len(urls)}")
err_count = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=threads_count) as executor:
future_to_url = {executor.submit(load_gtfs_feed_zip,
os.path.join(path, get_filename(file_prefix, i)), url):
url for i, url in enumerate(urls)}
for j, future in enumerate(concurrent.futures.as_completed(future_to_url), start=1):
url = future_to_url[future]
loaded = future.result()
if not loaded:
err_count += 1
logger.info(f"Handled {j}/{len(urls)} feed. Loaded = {loaded}. {url}")
logger.info(f"Done loading. {err_count}/{len(urls)} errors")
def crawl_openmobilitydata_for_feed_urls(path, omd_api_key):
"""Crawls openmobilitydata feeds API and parses feeds urls from json on each page
Do not try to parallel it because of the OpenMobilityData HTTP requests restriction."""
feed_urls = parse_openmobilitydata_pages(omd_api_key)
logger.info(f"Loaded feed urls {len(feed_urls)}")
write_list_to_file(os.path.join(path, URLS_FILE_OMD), feed_urls)
def main():
"""Downloads urls of feeds from feed aggregators and saves to the file.
Downloads feeds from these urls and saves to the directory."""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-p", "--path", required=True, help="working directory path")
parser.add_argument("-m", "--mode", required=True,
help="fullrun | load_feed_urls | load_feed_zips")
parser.add_argument("-s", "--source", default="transitland",
help="source of feeds: transitland | openmobilitydata | all")
parser.add_argument("-t", "--threads", type=int, default=THREADS_COUNT,
help="threads count for loading zips")
parser.add_argument("-k", "--omd_api_key", default="",
help="user key for working with openmobilitydata API")
args = parser.parse_args()
logging.basicConfig(filename=os.path.join(args.path, "crawling.log"), filemode="w", level=logging.INFO)
if args.mode in ["fullrun", "load_feed_urls"]:
if args.source in ["all", "transitland"]:
crawl_transitland_for_feed_urls(args.path)
if args.source in ["all", "openmobilitydata"]:
if not args.omd_api_key:
logger.error("No key provided for openmobilitydata. Set omd_api_key argument.")
return
crawl_openmobilitydata_for_feed_urls(args.path, args.omd_api_key)
if args.mode in ["fullrun", "load_feed_zips"]:
if args.source in ["all", "transitland"]:
load_gtfs_zips_from_urls(args.path, URLS_FILE_TRANSITLAND, args.threads, "tl")
if args.source in ["all", "openmobilitydata"]:
load_gtfs_zips_from_urls(args.path, URLS_FILE_OMD, args.threads, "omd")
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
from __future__ import print_function
import re, random, argparse
class Memory(object) :
def __init__(self, size, no_color) :
self.values = [0] * size
self.values[0], self.values[-1] = 128, 128
self.pointers = [0, size-1]
self.loops = []
self.no_color = no_color
def __str__(self) :
s = list(map(str, self.values))
p = self.pointers
if not self.no_color :
if p[0] == p[1] :
s[p[0]] = "\033[93m" + s[p[0]] + "\033[0m"
else :
s[p[0]] = "\033[91m" + s[p[0]] + "\033[0m"
s[p[1]] = "\033[92m" + s[p[1]] + "\033[0m"
else :
s[p[0]] = ">" + s[p[0]]
s[p[1]] = s[p[1]] + "<"
return "[ " + " | ".join(s) + " ]"
def inc(self, c, cpos) :
self.values[self.pointers[c]] += 1
if self.values[self.pointers[c]] > 128 :
self.values[self.pointers[c]] = -127
elif self.values[self.pointers[c]] < -127 :
self.values[self.pointers[c]] = 128
return cpos + 1
def dec(self, c, cpos) :
self.values[self.pointers[c]] -= 1
if self.values[self.pointers[c]] > 128 :
self.values[self.pointers[c]] = -127
elif self.values[self.pointers[c]] < -127 :
self.values[self.pointers[c]] = 128
return cpos + 1
def rshift(self, c, cpos) :
self.pointers[c] += 1 - 2*c
if self.pointers[c] >= len(self.values) :
self.pointers[c] = -len(self.values)
return cpos + 1
def lshift(self, c, cpos) :
self.pointers[c] -= 1 - 2*c
if self.pointers[c] >= len(self.values) :
self.pointers[c] = -len(self.values)
return cpos + 1
def loop(self, c, cpos) :
if self.values[self.pointers[c]] == 0 :
return self.loops[c][1][cpos] + 1
else :
return cpos + 1
def endloop(self, c, cpos) :
if self.values[self.pointers[c]] == 0 :
return cpos + 1
else :
return self.loops[c][0][cpos] + 1
def defer(self, c, cpos) :
return cpos + 1
class Code(object) :
def __init__(self, prog, raw) :
self.code = [ char for char in (prog if raw else open(prog).read()) if char in "+-><[].()*0123456789" ]
self.pos = 0
self.parens = self.matchBraces("(", ")")
self.counters = {}
def __str__(self) :
return str().join(self.code)
def __getitem__(self, index) :
if index < len(self.code) :
return self.code[index]
else :
return "."
def __setitem__(self, index, val) :
self.code[index] = val
def matchBraces(self, opn, cls) :
braces = {}
rev = lambda dt : dict(zip(dt.values(), dt.keys()))
scope = 0
for (i, char) in enumerate(self.code) :
if char == opn :
scope -= 1
braces[i] = scope
elif char == cls :
ia = rev(braces)[scope]
braces[ia] = i
scope += 1
return rev(braces), braces
def get(self, pos) :
t = self[pos]
if t == "(" :
if not pos in self.counters :
m = [ it for it in re.compile(r"\)\*(\d+)").finditer(str(self)) if it.start() == self.parens[1][pos] ][0]
c = int(m.group(1))
self.counters[pos] = [c, m.end()]
if self.counters[pos][0] > 0 :
self.counters[pos][0] -= 1
return self.get(pos+1)
else :
end = self.counters[pos][1]
del(self.counters[pos])
return self.get(end)
elif t == ")" :
return self.get(self.parens[0][pos])
else :
self.pos = pos
return t
def parseArguments() :
parser = argparse.ArgumentParser(description = "Arena program for the BrainFuckedBotsForBattling contest")
parser.add_argument("programs",
help = "BF program files or code (with -r)",
nargs = 2)
parser.add_argument("-n", "--names",
help = "The names of the two competing programs, defaults to file names",
nargs = 2)
parser.add_argument("-s", "--memory-size",
help = "Select the size of the memory tape, defaults to random number in [10, 30]",
type = int,
default = random.randint(10, 30))
parser.add_argument("-t", "--timeout",
help = "The number of cycles to complete before the game is considered a draw",
type = int,
default = 10000)
parser.add_argument("-r", "--raw",
help = " Provide programs directly as source code instead of filenames",
action = "store_true")
parser.add_argument("--no-color",
help = "Disable colored output",
action = "store_true")
args = parser.parse_args()
if not args.names :
args.names = [name.rsplit("/")[-1].rsplit(".", 1)[0] for name in args.programs]
return vars(args)
def finished(mem, c, clear = [[False, False]]) :
timeout = c >= args["timeout"]
win = [clear[0][t] and mem.values[t*-1] == 0 or mem.pointers[t] not in range(len(mem.values)) for t in (1, 0)]
better = [abs(mem.values[t*-1]) > abs(mem.values[t-1]) for t in (0, 1)]
clear[0] = [mem.values[t] == 0 for t in (0, -1)]
if all(win) or timeout and not any(better) :
if not args["no_color"] :
print("\n===== \033[93mDraw\033[0m game =====")
else :
print("\n===== Draw game =====")
return True
for i in (0, 1) :
if win[i] or timeout and better[i] :
if not args["no_color"] :
print("\n===== \033[{}m{}\033[0m won the battle after {} cycles =====".format(91+i, args["names"][i], c))
else :
print("\n===== {} won the battle after {} cycles =====".format(args["names"][i], c))
return True
return False
def main(params) :
# Initialize the memory tape
mem = Memory(params["memory_size"], params["no_color"])
# Interface between code and memory
controller = {
"+": mem.inc, "-": mem.dec,
">": mem.rshift, "<": mem.lshift,
"[": mem.loop, "]": mem.endloop,
".": mem.defer
}
# Get the code of the programs (and convert extended Brainfuck to Brainfuck if necessary)
codes = [ Code(prog, params["raw"]) for prog in params["programs"] ]
# Find matching loops and create dictionaries inside the memory instance
mem.loops = [code.matchBraces("[", "]") for code in codes]
print("===== Starting battle of <{1}> vs <{2}> with memory tape size: {0} =====\n".format(params["memory_size"], *params["names"]))
# Get ready to rumble!
cycle = 0
print(str(mem))
# Loop while none of the finishing conditions is reached
while not finished(mem, cycle) :
for code in codes :
code.instr = code.get(code.pos)
for code in codes[::-2*(codes[1].instr in "[]")+1] :
code.pos = controller[code.instr](codes.index(code), code.pos)
# Increment cycle counter
cycle += 1
print(str(mem))
def tournament(data) :
data["memory_size"] = random.randint(10, 30)
args.update(data)
print.__init__()
main(args)
return print
if __name__ == "__main__" :
args = parseArguments()
try :
main(args)
except Exception :
raise SyntaxError("Invalid syntax")
elif __name__ == "Arena" :
class captureOutput(list) :
def __call__(self, msg) :
self.append(msg)
args = {
"timeout" : 10000,
"raw" : True,
"no_color" : True
}
debug = print
print = captureOutput()
|
|
"""
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1, make_regression
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=10, n_jobs=1)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
diff_support = rfe.get_support() == rfe_svc.get_support()
assert_true(sum(diff_support) == len(diff_support))
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import functools
from oslo.config import cfg
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import units
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
GB = units.GiB
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
super(API, self).__init__(db_driver)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None):
exclusive_options = (snapshot, image_id, source_volume)
exclusive_options_set = sum(1 for option in
exclusive_options if option is not None)
if exclusive_options_set > 1:
msg = (_("May specify only one of snapshot, imageRef "
"or source volume"))
raise exception.InvalidInput(reason=msg)
check_policy(context, 'create')
if snapshot is not None:
if snapshot['status'] != "available":
msg = _("status must be available")
raise exception.InvalidSnapshot(reason=msg)
if not size:
size = snapshot['volume_size']
elif size < snapshot['volume_size']:
msg = _("Volume size cannot be lesser than"
" the Snapshot size")
raise exception.InvalidInput(reason=msg)
snapshot_id = snapshot['id']
else:
snapshot_id = None
if source_volume is not None:
if source_volume['status'] == "error":
msg = _("Unable to clone volumes that are in an error state")
raise exception.InvalidSourceVolume(reason=msg)
if not size:
size = source_volume['size']
else:
if size < source_volume['size']:
msg = _("Clones currently must be "
">= original volume size.")
raise exception.InvalidInput(reason=msg)
source_volid = source_volume['id']
else:
source_volid = None
def as_int(s):
try:
return int(s)
except (ValueError, TypeError):
return s
# tolerate size as stringified int
size = as_int(size)
if not isinstance(size, int) or size <= 0:
msg = (_("Volume size '%s' must be an integer and greater than 0")
% size)
raise exception.InvalidInput(reason=msg)
if (image_id and not (source_volume or snapshot)):
# check image existence
image_meta = self.image_service.show(context, image_id)
image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
#check image size is not larger than volume size.
if image_size_in_gb > size:
msg = _('Size of specified image is larger than volume size.')
raise exception.InvalidInput(reason=msg)
# Check image minDisk requirement is met for the particular volume
if size < image_meta.get('min_disk', 0):
msg = _('Image minDisk size is larger than the volume size.')
raise exception.InvalidInput(reason=msg)
if availability_zone is None:
if snapshot is not None:
availability_zone = snapshot['volume']['availability_zone']
elif source_volume is not None:
availability_zone = source_volume['availability_zone']
else:
availability_zone = CONF.storage_availability_zone
else:
self._check_availabilty_zone(availability_zone)
if CONF.cloned_volume_same_az:
if (snapshot and
snapshot['volume']['availability_zone'] !=
availability_zone):
msg = _("Volume must be in the same "
"availability zone as the snapshot")
raise exception.InvalidInput(reason=msg)
elif source_volume and \
source_volume['availability_zone'] != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the source volume")
raise exception.InvalidInput(reason=msg)
if not volume_type and not source_volume:
volume_type = volume_types.get_default_volume_type()
if not volume_type and source_volume:
volume_type_id = source_volume['volume_type_id']
else:
volume_type_id = volume_type.get('id')
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': size,
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota()
elif 'volumes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes"
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.VolumeLimitExceeded(allowed=quotas[over])
self._check_metadata_properties(context, metadata)
options = {'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': name,
'display_description': description,
'volume_type_id': volume_type_id,
'metadata': metadata,
'source_volid': source_volid}
try:
volume = self.db.volume_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.volume_destroy(context, volume['id'])
finally:
QUOTAS.rollback(context, reservations)
request_spec = {'volume_properties': options,
'volume_type': volume_type,
'volume_id': volume['id'],
'snapshot_id': volume['snapshot_id'],
'image_id': image_id,
'source_volid': volume['source_volid']}
if scheduler_hints:
filter_properties = {'scheduler_hints': scheduler_hints}
else:
filter_properties = {}
self._cast_create_volume(context, request_spec, filter_properties)
return volume
def _cast_create_volume(self, context, request_spec, filter_properties):
# NOTE(Rongze Zhu): It is a simple solution for bug 1008866
# If snapshot_id is set, make the call create volume directly to
# the volume host where the snapshot resides instead of passing it
# through the scheduler. So snapshot can be copy to new volume.
source_volid = request_spec['source_volid']
volume_id = request_spec['volume_id']
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']
if snapshot_id and CONF.snapshot_same_host:
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
source_volume_ref = self.db.volume_get(context,
snapshot_ref['volume_id'])
now = timeutils.utcnow()
values = {'host': source_volume_ref['host'], 'scheduled_at': now}
volume_ref = self.db.volume_update(context, volume_id, values)
# bypass scheduler and send request directly to volume
self.volume_rpcapi.create_volume(
context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id)
elif source_volid:
source_volume_ref = self.db.volume_get(context,
source_volid)
now = timeutils.utcnow()
values = {'host': source_volume_ref['host'], 'scheduled_at': now}
volume_ref = self.db.volume_update(context, volume_id, values)
# bypass scheduler and send request directly to volume
self.volume_rpcapi.create_volume(
context,
volume_ref,
volume_ref['host'],
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid)
else:
self.scheduler_rpcapi.create_volume(
context,
CONF.volume_topic,
volume_id,
snapshot_id,
image_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _check_availabilty_zone(self, availability_zone):
#NOTE(bcwaldon): This approach to caching fails to handle the case
# that an availability zone is disabled/removed.
if availability_zone in self.availability_zone_names:
return
azs = self.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
if availability_zone not in self.availability_zone_names:
msg = _("Availability zone is invalid")
LOG.warn(msg)
raise exception.InvalidInput(reason=msg)
def list_availability_zones(self):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled']) for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
return tuple(azs)
@wrap_check_policy
def delete(self, context, volume, force=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update quota for deleting volume"))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return
if not force and volume['status'] not in ["available", "error",
"error_restoring"]:
msg = _("Volume status must be available or error")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['attach_status'] == "migrating":
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume)
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id):
rv = self.db.volume_get(context, volume_id)
volume = dict(rv.iteritems())
check_policy(context, 'get', volume)
return volume
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters={}):
check_policy(context, 'get_all')
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir)
else:
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir)
# Non-admin shouldn't see temporary target of a volume migration
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug(_("Searching by: %s") % str(filters))
def _check_metadata_match(volume, searchdict):
volume_metadata = {}
for i in volume.get('volume_metadata'):
volume_metadata[i['key']] = i['value']
for k, v in searchdict.iteritems():
if (k not in volume_metadata.keys() or
volume_metadata[k] != v):
return False
return True
def _check_migration_target(volume, searchdict):
if not volume['status'].startswith('migration_target'):
return True
return False
# search_option to filter_name mapping.
filter_mapping = {'metadata': _check_metadata_match,
'no_migration_targets': _check_migration_target}
result = []
not_found = object()
for volume in volumes:
# go over all filters in the list
for opt, values in filters.iteritems():
try:
filter_func = filter_mapping[opt]
except KeyError:
def filter_func(volume, value):
return volume.get(opt, not_found) == value
if not filter_func(volume, values):
break # volume doesn't match this filter
else: # did not break out loop
result.append(volume) # volume matches all filters
volumes = result
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug(_("Searching by: %s") % str(search_opts))
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def check_attach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "in-use":
msg = _("status must be in-use to detach")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def reserve_volume(self, context, volume):
#NOTE(jdg): check for Race condition bug 1096983
#explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be available to reserve")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == "attaching":
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name, mountpoint):
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint)
@wrap_check_policy
def detach(self, context, volume):
return self.volume_rpcapi.detach_volume(context, volume)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume):
return self.volume_rpcapi.accept_transfer(context,
volume)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None):
check_policy(context, 'create_snapshot', volume)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota()
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(context, metadata)
options = {'volume_id': volume['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'metadata': metadata}
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.snapshot_destroy(context, volume['id'])
finally:
QUOTAS.rollback(context, reservations)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
False, metadata)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
def _check_metadata_properties(self, context, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig_meta = self.get_volume_metadata(context, volume)
if delete:
_metadata = metadata
else:
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(context, _metadata)
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig_meta = self.get_snapshot_metadata(context, snapshot)
if delete:
_metadata = metadata
else:
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(context, _metadata)
self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata_value(self, snapshot, key):
pass
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, context, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(context, volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume status must be available to extend.')
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s)") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size)
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] != "available":
msg = _("status must be available")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("volume must not have snapshots")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated, topic)
found = False
for service in services:
if utils.service_is_up(service) and service['host'] == host:
found = True
if not found:
msg = (_('No available service named %s') % host)
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different than current host')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'status': 'migrating'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
if volume['volume_type_id']:
volume_types.get_volume_type(context, volume['volume_type_id'])
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
|
|
from collections import OrderedDict
from datetime import datetime
from decimal import Decimal
from urllib.request import urlopen
from urllib.error import HTTPError
from xml.sax import handler, make_parser
import xml.etree.ElementTree
import json
import re
import time
from typing import Any, Callable, ClassVar, Dict, List, NoReturn, Optional, Tuple, Type, TypeVar, Union
from overpy import exception
# Ignore flake8 F401 warning for unused vars
from overpy.__about__ import ( # noqa: F401
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
ElementTypeVar = TypeVar("ElementTypeVar", bound="Element")
XML_PARSER_DOM = 1
XML_PARSER_SAX = 2
# Try to convert some common attributes
# http://wiki.openstreetmap.org/wiki/Elements#Common_attributes
GLOBAL_ATTRIBUTE_MODIFIERS: Dict[str, Callable] = {
"changeset": int,
"timestamp": lambda ts: datetime.strptime(ts, "%Y-%m-%dT%H:%M:%SZ"),
"uid": int,
"version": int,
"visible": lambda v: v.lower() == "true"
}
def is_valid_type(
element: Union["Area", "Node", "Relation", "Way"],
cls: Type[Union["Area", "Element", "Node", "Relation", "Way"]]) -> bool:
"""
Test if an element is of a given type.
:param element: The element instance to test
:param cls: The element class to test
:return: False or True
"""
return isinstance(element, cls) and element.id is not None
class Overpass:
"""
Class to access the Overpass API
:cvar default_max_retry_count: Global max number of retries (Default: 0)
:cvar default_read_chunk_size: Max size of each chunk read from the server response
:cvar default_retry_timeout: Global time to wait between tries (Default: 1.0s)
:cvar default_url: Default URL of the Overpass server
"""
default_max_retry_count: ClassVar[int] = 0
default_read_chunk_size: ClassVar[int] = 4096
default_retry_timeout: ClassVar[float] = 1.0
default_url: ClassVar[str] = "http://overpass-api.de/api/interpreter"
def __init__(
self,
read_chunk_size: Optional[int] = None,
url: Optional[str] = None,
xml_parser: int = XML_PARSER_SAX,
max_retry_count: int = None,
retry_timeout: float = None):
"""
:param read_chunk_size: Max size of each chunk read from the server response
:param url: Optional URL of the Overpass server. Defaults to http://overpass-api.de/api/interpreter
:param xml_parser: The xml parser to use
:param max_retry_count: Max number of retries (Default: default_max_retry_count)
:param retry_timeout: Time to wait between tries (Default: default_retry_timeout)
"""
self.url = self.default_url
if url is not None:
self.url = url
self._regex_extract_error_msg = re.compile(br"\<p\>(?P<msg>\<strong\s.*?)\</p\>")
self._regex_remove_tag = re.compile(b"<[^>]*?>")
if read_chunk_size is None:
read_chunk_size = self.default_read_chunk_size
self.read_chunk_size = read_chunk_size
if max_retry_count is None:
max_retry_count = self.default_max_retry_count
self.max_retry_count = max_retry_count
if retry_timeout is None:
retry_timeout = self.default_retry_timeout
self.retry_timeout = retry_timeout
self.xml_parser = xml_parser
@staticmethod
def _handle_remark_msg(msg: str) -> NoReturn:
"""
Try to parse the message provided with the remark tag or element.
:param msg: The message
:raises overpy.exception.OverpassRuntimeError: If message starts with 'runtime error:'
:raises overpy.exception.OverpassRuntimeRemark: If message starts with 'runtime remark:'
:raises overpy.exception.OverpassUnknownError: If we are unable to identify the error
"""
msg = msg.strip()
if msg.startswith("runtime error:"):
raise exception.OverpassRuntimeError(msg=msg)
elif msg.startswith("runtime remark:"):
raise exception.OverpassRuntimeRemark(msg=msg)
raise exception.OverpassUnknownError(msg=msg)
def query(self, query: Union[bytes, str]) -> "Result":
"""
Query the Overpass API
:param query: The query string in Overpass QL
:return: The parsed result
"""
if not isinstance(query, bytes):
query = query.encode("utf-8")
retry_num: int = 0
retry_exceptions: List[exception.OverPyException] = []
do_retry: bool = True if self.max_retry_count > 0 else False
while retry_num <= self.max_retry_count:
if retry_num > 0:
time.sleep(self.retry_timeout)
retry_num += 1
try:
f = urlopen(self.url, query)
except HTTPError as e:
f = e
response = f.read(self.read_chunk_size)
while True:
data = f.read(self.read_chunk_size)
if len(data) == 0:
break
response = response + data
f.close()
current_exception: exception.OverPyException
if f.code == 200:
content_type = f.getheader("Content-Type")
if content_type == "application/json":
return self.parse_json(response)
if content_type == "application/osm3s+xml":
return self.parse_xml(response)
current_exception = exception.OverpassUnknownContentType(content_type)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 400:
msgs: List[str] = []
for msg_raw in self._regex_extract_error_msg.finditer(response):
msg_clean_bytes = self._regex_remove_tag.sub(b"", msg_raw.group("msg"))
try:
msg = msg_clean_bytes.decode("utf-8")
except UnicodeDecodeError:
msg = repr(msg_clean_bytes)
msgs.append(msg)
current_exception = exception.OverpassBadRequest(
query,
msgs=msgs
)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 429:
current_exception = exception.OverpassTooManyRequests()
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 504:
current_exception = exception.OverpassGatewayTimeout()
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
current_exception = exception.OverpassUnknownHTTPStatusCode(f.code)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
raise exception.MaxRetriesReached(retry_count=retry_num, exceptions=retry_exceptions)
def parse_json(self, data: Union[bytes, str], encoding: str = "utf-8") -> "Result":
"""
Parse raw response from Overpass service.
:param data: Raw JSON Data
:param encoding: Encoding to decode byte string
:return: Result object
"""
if isinstance(data, bytes):
data = data.decode(encoding)
data_parsed: dict = json.loads(data, parse_float=Decimal)
if "remark" in data_parsed:
self._handle_remark_msg(msg=data_parsed.get("remark"))
return Result.from_json(data_parsed, api=self)
def parse_xml(self, data: Union[bytes, str], encoding: str = "utf-8", parser: Optional[int] = None):
"""
:param data: Raw XML Data
:param encoding: Encoding to decode byte string
:param parser: The XML parser to use
:return: Result object
"""
if parser is None:
parser = self.xml_parser
if isinstance(data, bytes):
data = data.decode(encoding)
m = re.compile("<remark>(?P<msg>[^<>]*)</remark>").search(data)
if m:
self._handle_remark_msg(m.group("msg"))
return Result.from_xml(data, api=self, parser=parser)
class Result:
"""
Class to handle the result.
"""
def __init__(
self,
elements: Optional[List[Union["Area", "Node", "Relation", "Way"]]] = None,
api: Optional[Overpass] = None):
"""
:param elements: List of elements to initialize the result with
:param api: The API object to load additional resources and elements
"""
if elements is None:
elements = []
self._areas: Dict[int, Union["Area", "Node", "Relation", "Way"]] = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Area)
)
self._nodes = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Node)
)
self._ways = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Way)
)
self._relations = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Relation)
)
self._class_collection_map: Dict[Any, Any] = {
Node: self._nodes,
Way: self._ways,
Relation: self._relations,
Area: self._areas
}
self.api = api
def expand(self, other: "Result"):
"""
Add all elements from an other result to the list of elements of this result object.
It is used by the auto resolve feature.
:param other: Expand the result with the elements from this result.
:raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
"""
if not isinstance(other, Result):
raise ValueError("Provided argument has to be instance of overpy:Result()")
other_collection_map: Dict[Type["Element"], List[Union["Area", "Node", "Relation", "Way"]]] = {
Area: other.areas,
Node: other.nodes,
Relation: other.relations,
Way: other.ways
}
for element_type, own_collection in self._class_collection_map.items():
for element in other_collection_map[element_type]:
if is_valid_type(element, element_type) and element.id not in own_collection:
own_collection[element.id] = element
def append(self, element: Union["Area", "Node", "Relation", "Way"]):
"""
Append a new element to the result.
:param element: The element to append
"""
if is_valid_type(element, Element):
self._class_collection_map[element.__class__].setdefault(element.id, element)
def get_elements(
self,
filter_cls: Type[ElementTypeVar],
elem_id: Optional[int] = None) -> List[ElementTypeVar]:
"""
Get a list of elements from the result and filter the element type by a class.
:param filter_cls:
:param elem_id: ID of the object
:return: List of available elements
"""
result: List[ElementTypeVar] = []
if elem_id is not None:
try:
result = [self._class_collection_map[filter_cls][elem_id]]
except KeyError:
result = []
else:
for e in self._class_collection_map[filter_cls].values():
result.append(e)
return result
def get_ids(
self,
filter_cls: Type[Union["Area", "Node", "Relation", "Way"]]) -> List[int]:
"""
Get all Element IDs
:param filter_cls: Only IDs of elements with this type
:return: List of IDs
"""
return list(self._class_collection_map[filter_cls].keys())
def get_node_ids(self) -> List[int]:
return self.get_ids(filter_cls=Node)
def get_way_ids(self) -> List[int]:
return self.get_ids(filter_cls=Way)
def get_relation_ids(self) -> List[int]:
return self.get_ids(filter_cls=Relation)
def get_area_ids(self) -> List[int]:
return self.get_ids(filter_cls=Area)
@classmethod
def from_json(cls, data: dict, api: Optional[Overpass] = None) -> "Result":
"""
Create a new instance and load data from json object.
:param data: JSON data returned by the Overpass API
:param api:
:return: New instance of Result object
"""
result = cls(api=api)
elem_cls: Type[Union["Area", "Node", "Relation", "Way"]]
for elem_cls in [Node, Way, Relation, Area]:
for element in data.get("elements", []):
e_type = element.get("type")
if hasattr(e_type, "lower") and e_type.lower() == elem_cls._type_value:
result.append(elem_cls.from_json(element, result=result))
return result
@classmethod
def from_xml(
cls,
data: Union[str, xml.etree.ElementTree.Element],
api: Optional[Overpass] = None,
parser: Optional[int] = None) -> "Result":
"""
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:param api: The instance to query additional information if required.
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:return: New instance of Result object
"""
if parser is None:
if isinstance(data, str):
parser = XML_PARSER_SAX
else:
parser = XML_PARSER_DOM
result = cls(api=api)
if parser == XML_PARSER_DOM:
import xml.etree.ElementTree as ET
if isinstance(data, str):
root = ET.fromstring(data)
elif isinstance(data, ET.Element):
root = data
else:
raise exception.OverPyException("Unable to detect data type.")
elem_cls: Type[Union["Area", "Node", "Relation", "Way"]]
for elem_cls in [Node, Way, Relation, Area]:
for child in root:
if child.tag.lower() == elem_cls._type_value:
result.append(elem_cls.from_xml(child, result=result))
elif parser == XML_PARSER_SAX:
from io import StringIO
if not isinstance(data, str):
raise ValueError("data must be of type str if using the SAX parser")
source = StringIO(data)
sax_handler = OSMSAXHandler(result)
sax_parser = make_parser()
sax_parser.setContentHandler(sax_handler)
sax_parser.parse(source)
else:
# ToDo: better exception
raise Exception("Unknown XML parser")
return result
def get_area(self, area_id: int, resolve_missing: bool = False) -> "Area":
"""
Get an area by its ID.
:param area_id: The area ID
:param resolve_missing: Query the Overpass API if the area is missing in the result set.
:return: The area
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved.
"""
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing area is disabled")
query = ("\n"
"[out:json];\n"
"area({area_id});\n"
"out body;\n"
)
query = query.format(
area_id=area_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
raise exception.DataIncomplete("Unable to resolve requested areas")
return areas[0]
def get_areas(self, area_id: Optional[int] = None) -> List["Area"]:
"""
Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:return: List of elements
"""
return self.get_elements(Area, elem_id=area_id)
def get_node(self, node_id: int, resolve_missing: bool = False) -> "Node":
"""
Get a node by its ID.
:param node_id: The node ID
:param resolve_missing: Query the Overpass API if the node is missing in the result set.
:return: The node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
query = ("\n"
"[out:json];\n"
"node({node_id});\n"
"out body;\n"
)
query = query.format(
node_id=node_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
raise exception.DataIncomplete("Unable to resolve all nodes")
return nodes[0]
def get_nodes(self, node_id: Optional[int] = None) -> List["Node"]:
"""
Alias for get_elements() but filter the result by Node()
:param node_id: The Id of the node
:type node_id: Integer
:return: List of elements
"""
return self.get_elements(Node, elem_id=node_id)
def get_relation(self, rel_id: int, resolve_missing: bool = False) -> "Relation":
"""
Get a relation by its ID.
:param rel_id: The relation ID
:param resolve_missing: Query the Overpass API if the relation is missing in the result set.
:return: The relation
:raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved.
"""
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing relations is disabled")
query = ("\n"
"[out:json];\n"
"relation({relation_id});\n"
"out body;\n"
)
query = query.format(
relation_id=rel_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
raise exception.DataIncomplete("Unable to resolve requested reference")
return relations[0]
def get_relations(self, rel_id: int = None) -> List["Relation"]:
"""
Alias for get_elements() but filter the result by Relation
:param rel_id: Id of the relation
:return: List of elements
"""
return self.get_elements(Relation, elem_id=rel_id)
def get_way(self, way_id: int, resolve_missing: bool = False) -> "Way":
"""
Get a way by its ID.
:param way_id: The way ID
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0]
def get_ways(self, way_id: Optional[int] = None) -> List["Way"]:
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id)
area_ids = property(get_area_ids)
areas = property(get_areas)
node_ids = property(get_node_ids)
nodes = property(get_nodes)
relation_ids = property(get_relation_ids)
relations = property(get_relations)
way_ids = property(get_way_ids)
ways = property(get_ways)
class Element:
"""
Base element
"""
_type_value: str
def __init__(self, attributes: Optional[dict] = None, result: Optional[Result] = None, tags: Optional[Dict] = None):
"""
:param attributes: Additional attributes
:param result: The result object this element belongs to
:param tags: List of tags
"""
self._result = result
self.attributes = attributes
# ToDo: Add option to modify attribute modifiers
attribute_modifiers: Dict[str, Callable] = dict(GLOBAL_ATTRIBUTE_MODIFIERS.items())
for n, m in attribute_modifiers.items():
if n in self.attributes:
self.attributes[n] = m(self.attributes[n])
self.id: int
self.tags = tags
@classmethod
def get_center_from_json(cls, data: dict) -> Tuple[Decimal, Decimal]:
"""
Get center information from json data
:param data: json data
:return: tuple with two elements: lat and lon
"""
center_lat = None
center_lon = None
center = data.get("center")
if isinstance(center, dict):
center_lat = center.get("lat")
center_lon = center.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat)
center_lon = Decimal(center_lon)
return center_lat, center_lon
@classmethod
def get_center_from_xml_dom(cls, sub_child: xml.etree.ElementTree.Element) -> Tuple[Decimal, Decimal]:
center_lat_str: str = sub_child.attrib.get("lat")
center_lon_str: str = sub_child.attrib.get("lon")
if center_lat_str is None or center_lon_str is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat_str)
center_lon = Decimal(center_lon_str)
return center_lat, center_lon
@classmethod
def from_json(cls: Type[ElementTypeVar], data: dict, result: Optional[Result] = None) -> ElementTypeVar:
"""
Create new Element() from json data
:param data:
:param result:
:return:
"""
raise NotImplementedError
@classmethod
def from_xml(
cls: Type[ElementTypeVar],
child: xml.etree.ElementTree.Element,
result: Optional[Result] = None) -> ElementTypeVar:
"""
Create new Element() element from XML data
"""
raise NotImplementedError
class Area(Element):
"""
Class to represent an element of type area
"""
_type_value = "area"
def __init__(self, area_id: Optional[int] = None, **kwargs):
"""
:param area_id: Id of the area element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
#: The id of the way
self.id = area_id
def __repr__(self) -> str:
return f"<overpy.Area id={self.id}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Area":
"""
Create new Area element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
area_id = data.get("id")
attributes = {}
ignore = ["id", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Area":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
area_id_str: Optional[str] = child.attrib.get("id")
area_id: Optional[int] = None
if area_id_str is not None:
area_id = int(area_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result)
class Node(Element):
"""
Class to represent an element of type node
"""
_type_value = "node"
def __init__(
self,
node_id: Optional[int] = None,
lat: Optional[Union[Decimal, float]] = None,
lon: Optional[Union[Decimal, float]] = None,
**kwargs):
"""
:param lat: Latitude
:param lon: Longitude
:param node_id: Id of the node element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
self.id = node_id
self.lat = lat
self.lon = lon
def __repr__(self) -> str:
return f"<overpy.Node id={self.id} lat={self.lat} lon={self.lon}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Node":
"""
Create new Node element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Node
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
node_id = data.get("id")
lat = data.get("lat")
lon = data.get("lon")
attributes = {}
ignore = ["type", "id", "lat", "lon", "tags"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Node":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
node_id: Optional[int] = None
node_id_str: Optional[str] = child.attrib.get("id")
if node_id_str is not None:
node_id = int(node_id_str)
lat: Optional[Decimal] = None
lat_str: Optional[str] = child.attrib.get("lat")
if lat_str is not None:
lat = Decimal(lat_str)
lon: Optional[Decimal] = None
lon_str: Optional[str] = child.attrib.get("lon")
if lon_str is not None:
lon = Decimal(lon_str)
attributes = {}
ignore = ["id", "lat", "lon"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
class Way(Element):
"""
Class to represent an element of type way
"""
_type_value = "way"
def __init__(
self,
way_id: Optional[int] = None,
center_lat: Optional[Union[Decimal, float]] = None,
center_lon: Optional[Union[Decimal, float]] = None,
node_ids: Optional[Union[List[int], Tuple[int]]] = None,
**kwargs):
"""
:param node_ids: List of node IDs
:param way_id: Id of the way element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
#: The id of the way
self.id = way_id
#: List of Ids of the associated nodes
self._node_ids = node_ids
#: The lat/lon of the center of the way (optional depending on query)
self.center_lat = center_lat
self.center_lon = center_lon
def __repr__(self):
return f"<overpy.Way id={self.id} nodes={self._node_ids}>"
@property
def nodes(self) -> List[Node]:
"""
List of nodes associated with the way.
"""
return self.get_nodes()
def get_nodes(self, resolve_missing: bool = False) -> List[Node]:
"""
Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:return: List of nodes
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
result = []
resolved = False
for node_id in self._node_ids:
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is not None:
result.append(node)
continue
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
# We tried to resolve the data but some nodes are still missing
if resolved:
raise exception.DataIncomplete("Unable to resolve all nodes")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"node(w);\n"
"out body;\n"
)
query = query.format(
way_id=self.id
)
tmp_result = self._result.api.query(query)
self._result.expand(tmp_result)
resolved = True
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is None:
raise exception.DataIncomplete("Unable to resolve all nodes")
result.append(node)
return result
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Way":
"""
Create new Way element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Way":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id_str = sub_child.attrib.get("ref")
if ref_id_str is None:
raise ValueError("Unable to find required ref value.")
ref_id: int = int(ref_id_str)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id: Optional[int] = None
way_id_str: Optional[str] = child.attrib.get("id")
if way_id_str is not None:
way_id = int(way_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result)
class Relation(Element):
"""
Class to represent an element of type relation
"""
_type_value = "relation"
def __init__(
self,
rel_id: Optional[int] = None,
center_lat: Optional[Union[Decimal, float]] = None,
center_lon: Optional[Union[Decimal, float]] = None,
members: Optional[List["RelationMember"]] = None,
**kwargs):
"""
:param members:
:param rel_id: Id of the relation element
:param kwargs:
:return:
"""
Element.__init__(self, **kwargs)
self.id = rel_id
self.members = members
#: The lat/lon of the center of the way (optional depending on query)
self.center_lat = center_lat
self.center_lon = center_lon
def __repr__(self):
return f"<overpy.Relation id={self.id}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Relation":
"""
Create new Relation element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Relation
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
rel_id = data.get("id")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
members = []
supported_members = [RelationNode, RelationWay, RelationRelation]
for member in data.get("members", []):
type_value = member.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_json(
member,
result=result
)
)
attributes = {}
ignore = ["id", "members", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Relation":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
members = []
center_lat = None
center_lon = None
supported_members = [RelationNode, RelationWay, RelationRelation, RelationArea]
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "member":
type_value = sub_child.attrib.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_xml(
sub_child,
result=result
)
)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
rel_id: Optional[int] = None
rel_id_str: Optional[str] = child.attrib.get("id")
if rel_id_str is not None:
rel_id = int(rel_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
)
class RelationMember:
"""
Base class to represent a member of a relation.
"""
_type_value: Optional[str] = None
def __init__(
self,
attributes: Optional[dict] = None,
geometry: Optional[List["RelationWayGeometryValue"]] = None,
ref: Optional[int] = None,
role: Optional[str] = None,
result: Optional[Result] = None):
"""
:param ref: Reference Id
:type ref: Integer
:param role: The role of the relation member
:type role: String
:param result:
"""
self.ref = ref
self._result = result
self.role = role
self.attributes = attributes
self.geometry = geometry
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "RelationMember":
"""
Create new RelationMember element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
ref = data.get("ref")
role = data.get("role")
attributes = {}
ignore = ["geometry", "type", "ref", "role"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
geometry = data.get("geometry")
if isinstance(geometry, list):
geometry_orig = geometry
geometry = []
for v in geometry_orig:
geometry.append(
RelationWayGeometryValue(
lat=v.get("lat"),
lon=v.get("lon")
)
)
else:
geometry = None
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
)
@classmethod
def from_xml(
cls,
child: xml.etree.ElementTree.Element,
result: Optional[Result] = None) -> "RelationMember":
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:param result: The result this element belongs to
:return: New relation member oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
ref: Optional[int] = None
ref_str: Optional[str] = child.attrib.get("ref")
if ref_str is not None:
ref = int(ref_str)
role: Optional[str] = child.attrib.get("role")
attributes = {}
ignore = ["geometry", "ref", "role", "type"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
geometry = None
for sub_child in child:
if sub_child.tag.lower() == "nd":
if geometry is None:
geometry = []
geometry.append(
RelationWayGeometryValue(
lat=Decimal(sub_child.attrib["lat"]),
lon=Decimal(sub_child.attrib["lon"])
)
)
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
)
class RelationNode(RelationMember):
_type_value = "node"
def resolve(self, resolve_missing: bool = False) -> Node:
return self._result.get_node(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationNode ref={self.ref} role={self.role}>"
class RelationWay(RelationMember):
_type_value = "way"
def resolve(self, resolve_missing: bool = False) -> Way:
return self._result.get_way(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationWay ref={self.ref} role={self.role}>"
class RelationWayGeometryValue:
def __init__(self, lat: Union[Decimal, float], lon: Union[Decimal, float]):
self.lat = lat
self.lon = lon
def __repr__(self):
return f"<overpy.RelationWayGeometryValue lat={self.lat} lon={self.lon}>"
class RelationRelation(RelationMember):
_type_value = "relation"
def resolve(self, resolve_missing: bool = False) -> Relation:
return self._result.get_relation(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationRelation ref={self.ref} role={self.role}>"
class RelationArea(RelationMember):
_type_value = "area"
def resolve(self, resolve_missing: bool = False) -> Area:
return self._result.get_area(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationArea ref={self.ref} role={self.role}>"
class OSMSAXHandler(handler.ContentHandler):
"""
SAX parser for Overpass XML response.
"""
#: Tuple of opening elements to ignore
ignore_start: ClassVar = ('osm', 'meta', 'note', 'bounds', 'remark')
#: Tuple of closing elements to ignore
ignore_end: ClassVar = ('osm', 'meta', 'note', 'bounds', 'remark', 'tag', 'nd', 'center')
def __init__(self, result: Result):
"""
:param result: Append results to this result set.
"""
handler.ContentHandler.__init__(self)
self._result = result
self._curr: Dict[str, Any] = {}
#: Current relation member object
self.cur_relation_member: Optional[RelationMember] = None
def startElement(self, name: str, attrs: dict):
"""
Handle opening elements.
:param name: Name of the element
:param attrs: Attributes of the element
"""
if name in self.ignore_start:
return
try:
handler = getattr(self, '_handle_start_%s' % name)
except AttributeError:
raise KeyError("Unknown element start '%s'" % name)
handler(attrs)
def endElement(self, name: str):
"""
Handle closing elements
:param name: Name of the element
"""
if name in self.ignore_end:
return
try:
handler = getattr(self, '_handle_end_%s' % name)
except AttributeError:
raise KeyError("Unknown element end '%s'" % name)
handler()
def _handle_start_center(self, attrs: dict):
"""
Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict
"""
center_lat = attrs.get("lat")
center_lon = attrs.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
self._curr["center_lat"] = Decimal(center_lat)
self._curr["center_lon"] = Decimal(center_lon)
def _handle_start_tag(self, attrs: dict):
"""
Handle opening tag element
:param attrs: Attributes of the element
"""
try:
tag_key = attrs['k']
except KeyError:
raise ValueError("Tag without name/key.")
self._curr['tags'][tag_key] = attrs.get('v')
def _handle_start_node(self, attrs: dict):
"""
Handle opening node element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'lat': None,
'lon': None,
'node_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['node_id'] = int(attrs['id'])
del self._curr['attributes']['id']
if attrs.get('lat', None) is not None:
self._curr['lat'] = Decimal(attrs['lat'])
del self._curr['attributes']['lat']
if attrs.get('lon', None) is not None:
self._curr['lon'] = Decimal(attrs['lon'])
del self._curr['attributes']['lon']
def _handle_end_node(self):
"""
Handle closing node element
"""
self._result.append(Node(result=self._result, **self._curr))
self._curr = {}
def _handle_start_way(self, attrs: dict):
"""
Handle opening way element
:param attrs: Attributes of the element
"""
self._curr = {
'center_lat': None,
'center_lon': None,
'attributes': dict(attrs),
'node_ids': [],
'tags': {},
'way_id': None
}
if attrs.get('id', None) is not None:
self._curr['way_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_way(self):
"""
Handle closing way element
"""
self._result.append(Way(result=self._result, **self._curr))
self._curr = {}
def _handle_start_area(self, attrs: dict):
"""
Handle opening area element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_area(self):
"""
Handle closing area element
"""
self._result.append(Area(result=self._result, **self._curr))
self._curr = {}
def _handle_start_nd(self, attrs: dict):
"""
Handle opening nd element
:param attrs: Attributes of the element
"""
if isinstance(self.cur_relation_member, RelationWay):
if self.cur_relation_member.geometry is None:
self.cur_relation_member.geometry = []
self.cur_relation_member.geometry.append(
RelationWayGeometryValue(
lat=Decimal(attrs["lat"]),
lon=Decimal(attrs["lon"])
)
)
else:
try:
node_ref = attrs['ref']
except KeyError:
raise ValueError("Unable to find required ref value.")
self._curr['node_ids'].append(int(node_ref))
def _handle_start_relation(self, attrs: dict):
"""
Handle opening relation element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'members': [],
'rel_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['rel_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_relation(self):
"""
Handle closing relation element
"""
self._result.append(Relation(result=self._result, **self._curr))
self._curr = {}
def _handle_start_member(self, attrs: dict):
"""
Handle opening member element
:param attrs: Attributes of the element
"""
params: Dict[str, Any] = {
# ToDo: Parse attributes
'attributes': {},
'ref': None,
'result': self._result,
'role': None
}
if attrs.get('ref', None):
params['ref'] = int(attrs['ref'])
if attrs.get('role', None):
params['role'] = attrs['role']
cls_map = {
"area": RelationArea,
"node": RelationNode,
"relation": RelationRelation,
"way": RelationWay
}
cls: Type[RelationMember] = cls_map.get(attrs["type"])
if cls is None:
raise ValueError("Undefined type for member: '%s'" % attrs['type'])
self.cur_relation_member = cls(**params)
self._curr['members'].append(self.cur_relation_member)
def _handle_end_member(self):
self.cur_relation_member = None
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 14:26:20 2016
@author: apezeshk
"""
import numpy as np
import lasagne
from lasagne.layers import dnn
import theano.tensor as T
import theano
import scipy.io as sio
import os
def Path_create(file_name): #takes something like p0023_2000123_s324 and returns p0023/2000123/s324
spl_dir=file_name[:].replace('_','/')
return spl_dir
def Build_3dfcn_mask(init_norm, inputParamsNetwork, input_var=None):
dropout = inputParamsNetwork['dropout']
# print(dropout)
network = lasagne.layers.InputLayer(shape=(None,1,int(inputParamsNetwork['shape'].split(',')[0]),int(inputParamsNetwork['shape'].split(',')[1]),int(inputParamsNetwork['shape'].split(',')[2])),
input_var=input_var)
if inputParamsNetwork['n_layer'] == 2 :
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad='same', filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
flip_filters=False
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 1))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad='same', filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))
else:
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad='same', filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
flip_filters=False
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 1))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad='same', filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad='same', filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
#This is the layer that substitutes the fully connected layer according to current patch size & architecture
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad=0, filter_size=(9, 9, 4),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
if 0:
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
# Use this option to check and make sure the output from FC at previous layer of original CNN and fully convolutional
# counterpart in FCN give the same result
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=dropout),
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
else:
# Use this option to complete the conversion to FCN (i.e. no more dense layers)
# will do the softmax later, it is giving error if we use it here for some reason
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=1, pad=0, filter_size=(1, 1, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
)
return network
##############################
##############################
# And load them again later on like this:
pathSavedNetwork = '/home/apezeshk/Codes/DeepMed/models/cnn_36368_20160921114711.npz'
pathSavedSamples = '/home/apezeshk/Codes/DeepMed/models/cnn_36368_20160921114711_samples.npz'
currentCaseName = 'p0012_20000101_s3000561.npy'
input_3D_npy = '/diskStation/LIDC/LIDC_NUMPY_3d'
masterFolderLidc = '/raida/apezeshk/lung_dicom_dir'
########################
######Input Params######
inputParamsConfigLocal = {}
inputParamsConfigLocal['input_shape'] = '36, 36, 8'
inputParamsConfigLocal['learning_rate'] = '0.05'
inputParamsConfigLocal['momentum'] = '0.9'
inputParamsConfigLocal['num_epochs'] = '1'
inputParamsConfigLocal['batch_size'] = '1'
inputParamsConfigLocal['data_path'] = '/diskStation/LIDC/36368/'
inputParamsConfigLocal['train_set_size'] = '60000'
inputParamsConfigLocal['test_set_size'] = '500'
inputParamsConfigLocal['positive_set_ratio'] = '0.3'
inputParamsConfigLocal['dropout'] = '0.1'
inputParamsConfigLocal['nonlinearityToUse'] = 'relu'
inputParamsConfigLocal['numberOfLayers'] = 3
inputParamsConfigLocal['augmentationFlag'] = 1
inputParamsConfigLocal['weightInitToUse'] ='He' #weight initialization; either 'normal' or 'He' (for HeNormal)
inputParamsConfigLocal['lrDecayFlag'] = 1 #1 for using learning rate decay, 0 for constant learning rate throughout training
inputParamsConfigLocal['biasInitVal'] = 0.0 #1 for using learning rate decay, 0 for constant learning rate throughout training
inputParamsConfigAll = inputParamsConfigLocal
input_shape = inputParamsConfigAll['input_shape']
learning_rate = inputParamsConfigAll['learning_rate']
momentum = inputParamsConfigAll['momentum']
num_epochs = inputParamsConfigAll['num_epochs']
batch_size = inputParamsConfigAll['batch_size']
data_path = inputParamsConfigAll['data_path']
train_set_size = inputParamsConfigAll['train_set_size']
test_set_size = inputParamsConfigAll['test_set_size']
positive_set_ratio = inputParamsConfigAll['positive_set_ratio']
dropout = inputParamsConfigAll['dropout']
nonlinearityToUse = inputParamsConfigAll['nonlinearityToUse']
augmentationFlag = inputParamsConfigAll['augmentationFlag']
numberOfLayers = inputParamsConfigAll['numberOfLayers']
biasInitVal = inputParamsConfigAll['biasInitVal']
weight_init = lasagne.init.Normal() #we now use He, but since everything is being loaded this is ok!!
biasInit = lasagne.init.Constant(biasInitVal) #for relu use biasInit=1 s.t. inputs to relu are positive in beginning
nonLinearity = lasagne.nonlinearities.linear #use linear since u just want propagation of mask thru model
inputParamsNetwork = dict(n_layer=numberOfLayers, shape=input_shape,dropout=float(dropout), nonLinearity=nonLinearity,
biasInit = biasInit)
dtensor5 = T.TensorType('float32', (False,) * 5)
input_var = dtensor5('inputs')
network_fcn_mask = Build_3dfcn_mask(weight_init, inputParamsNetwork, input_var)
param_values_fcn_default = lasagne.layers.get_all_param_values(network_fcn_mask) #just so to get the fully connected dimension
######Input Params######
########################
#with np.load(pathSavedNetwork) as f:
# param_values_fullnetwork = [f['arr_%d' % i] for i in range(len(f.files))]
W0 = np.ones((1,1,1,1,1)).astype('float32')
b0 = np.zeros((1,)).astype('float32')
W2 = np.ones((1,1,1,1,1)).astype('float32')
b2 = np.zeros((1,)).astype('float32')
if numberOfLayers == 2:
W4 = np.zeros(np.shape(param_values_fcn_default[4])[2:]).astype('float32') #get the filter shape of first fully connected layer in original network
current_filt_shape = W4.shape
W4[int(np.floor(current_filt_shape[0]/2.0)), int(np.floor(current_filt_shape[1]/2.0)), int(np.floor(current_filt_shape[2]/2.0)-1)] = 1
W4[int(np.floor(current_filt_shape[0]/2.0)), int(np.floor(current_filt_shape[1]/2.0)), int(np.floor(current_filt_shape[2]/2.0))] = 1
W4 = W4 * 0.5 #this is so that the output range will not change (since instead of delta fn, 2 entries are equal to 1)
W4 = np.reshape(W4, (1,1,current_filt_shape[0],current_filt_shape[1],current_filt_shape[2])) #make it 5-tuple
b4 = np.zeros((1,)).astype('float32')
W6 = np.ones((1,1,1,1,1)).astype('float32')
b6 = np.zeros((1,)).astype('float32')
param_values_mask = []
param_values_mask.extend([W0, b0, W2, b2, W4, b4, W6, b6])
elif numberOfLayers == 3:
W4 = np.ones((1,1,1,1,1)).astype('float32')
b4 = np.zeros((1,)).astype('float32')
W6 = np.zeros(np.shape(param_values_fcn_default[6])[2:]).astype('float32') #get the filter shape of first fully connected layer in original network
current_filt_shape = W6.shape
# When fully connected layer has even size in z direction, e.g. (9,9,4), we can't have a delta function as filter
# So using a filter with same size, with two 0.5s in it in 2nd and 3rd indices as next best thing!
W6[int(np.floor(current_filt_shape[0]/2.0)), int(np.floor(current_filt_shape[1]/2.0)), int(np.floor(current_filt_shape[2]/2.0)-1)] = 1
W6[int(np.floor(current_filt_shape[0]/2.0)), int(np.floor(current_filt_shape[1]/2.0)), int(np.floor(current_filt_shape[2]/2.0))] = 1
W6 = W6 * 0.5 #this is so that the output range will not change (since instead of delta fn, 2 entries are equal to 1)
W6 = np.reshape(W6, (1,1,current_filt_shape[0],current_filt_shape[1],current_filt_shape[2])) #make it 5-tuple
b6 = np.zeros((1,)).astype('float32')
W8 = np.ones((1,1,1,1,1)).astype('float32')
b8 = np.zeros((1,)).astype('float32')
param_values_mask = []
param_values_mask.extend([W0, b0, W2, b2, W4, b4, W6, b6, W8, b8])
lasagne.layers.set_all_param_values(network_fcn_mask, param_values_mask) #load the model with the weights/biases
mask_prediction = lasagne.layers.get_output(network_fcn_mask, deterministic=True)
val_fn = theano.function([input_var], [mask_prediction]) # ,mode='DebugMode')
################################################################################
######Now load the nodule mask, and shove it into the network
################################################################################
full_volume_path=os.path.join(input_3D_npy, currentCaseName)
full_mask_path = os.path.join(masterFolderLidc, Path_create(os.path.basename(full_volume_path))[:-4])
mat_name = 'uniqueStats_' + os.path.basename(full_volume_path)[:-4] + '.mat'
uniqueStatsData = sio.loadmat(os.path.join(full_mask_path, mat_name))
full_mask = uniqueStatsData['allMaxRadiologistMsk']
full_mask = full_mask.astype('float32')
#MAKE SURE THE TYPE FOR NODULE MASK IS RIGHT IN BELOW; DO U HAVE TO CONVERT TO INT16 THEN FLOAT32?!!
chopVolumeFlag = 1
cutPointFlag = 1
z_depth = 8
sub_vol_one = []
full_mask = full_mask.reshape((1, 1, 512, 512, full_mask.shape[2]))
if cutPointFlag == 1:
xCutPoints = [0, 512]
yCutPoints = [0, 512]
tmpFlag = 0
zCutPoints = [0]
zStep = 80
while tmpFlag != 7321: # to make the loop end, set tmpFlag=7321; otherwise hold prev slice number in it
currentZCut = tmpFlag + zStep
if currentZCut > full_mask.shape[4]:
currentZCut = full_mask.shape[4]
zCutPoints.append(currentZCut)
tmpFlag = 7321
else:
tmpFlag = currentZCut - z_depth # this is amount of overlap between consecutive chops in z direction
zCutPoints.append(currentZCut)
zCutPoints.append(tmpFlag)
z_size=[]
x_size=[]
y_size=[]
first_cube_flag=0
vol_scores_currentVol = np.empty((0, 2))
score_mat=np.zeros(())
vol_labels_currentVol = []
#this part is for the cases that last two slices should be changed if you we wanna to FCN( they got small z
# we take from one cube by 20 and add it to another cube
if (zCutPoints[-1]-zCutPoints[-2])<=10:
zCutPoints[-3]=zCutPoints[-3]-20
zCutPoints[-2] = zCutPoints[-2] - 20
for i in range(0, len(xCutPoints) / 2):
for j in range(0, len(yCutPoints) / 2):
for k in range(0, len(zCutPoints) / 2):
xStart = xCutPoints[2 * i]
xEnd = xCutPoints[2 * i + 1]
yStart = yCutPoints[2 * j]
yEnd = yCutPoints[2 * j + 1]
zStart = zCutPoints[2 * k]
zEnd = zCutPoints[2 * k + 1]
print(xStart, xEnd - 1, yStart, yEnd - 1, zStart, zEnd - 1)
asd = full_mask[0, 0, xStart:xEnd, yStart:yEnd, zStart:zEnd]
asd = asd.reshape((1, 1, asd.shape[0], asd.shape[1], asd.shape[2])) #put subvolume in 5D form for input to FCN
test_pred_full_mask = val_fn(asd)
test_pred_full_mask = test_pred_full_mask[0]
# test_pred_full_mask_softmax0 = np.exp(test_pred_full_mask[0, 0, :, :, :]) / (
# np.exp(test_pred_full_mask[0, 0, :, :, :]) + np.exp(test_pred_full_mask[0, 1, :, :, :]))
# test_pred_full_mask_softmax1 = np.exp(test_pred_full_mask[0, 1, :, :, :]) / (
# np.exp(test_pred_full_mask[0, 0, :, :, :]) + np.exp(test_pred_full_mask[0, 1, :, :, :]))
#tmp_sub_vol=test_pred_full_mask_softmax1
tmp_sub_vol = test_pred_full_mask.squeeze() #go from e.g. (1,1,120,120,25) to (120,120,25)
if xStart==xCutPoints[0] and yStart==yCutPoints[0]:
#NOTE: when u split the volume N times, the difference in size due to 0 padding in last layer
# is repeated N times also! So whereas if u passed the entire volume with first fully connected
# layer (converted to fully convolutional) of size (9,9,4) you would get -4+1=-3 as many slices,
# if you split the volume in 2 and pass each subvolume, you get another round of -3 slices in
# the end!!!
try:#This part adds the sub volumes back to back and overwrites the bad slice with the correct one
sub_vol_one=np.concatenate((sub_vol_one[:,:,:-2],tmp_sub_vol[:,:,2:]),axis=2) #I set the concatination margin to 2 since we have a one max pool for Z and last 2 slices are not correctly convolved
except:
sub_vol_one=tmp_sub_vol
sub_vol_one_bin = (sub_vol_one>0.0).astype('int') #convert to binary; it originally has 0.5 values due to z direction elongation in fully connected layer filter
|
|
from distutils.ccompiler import new_compiler as _new_compiler
from distutils.command.clean import clean, log
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.errors import DistutilsExecError
from distutils.msvccompiler import MSVCCompiler
from setuptools import setup, find_packages, Extension, Distribution
from setuptools.command.build_ext import build_ext
from shlex import quote
from subprocess import Popen, PIPE
import argparse
import errno
import os
import platform
import re
import shlex
import sys
try:
# This depends on _winreg, which is not available on not-Windows.
from distutils.msvc9compiler import MSVCCompiler as MSVC9Compiler
except ImportError:
MSVC9Compiler = None
try:
from distutils._msvccompiler import MSVCCompiler as MSVC14Compiler
except ImportError:
MSVC14Compiler = None
try:
from Cython import __version__ as cython_version
from Cython.Build import cythonize
except ImportError:
cythonize = None
else:
# We depend upon some features in Cython 0.27; reject older ones.
if tuple(map(int, cython_version.split('.'))) < (0, 27):
print("Cython {} is too old for PyAV; ignoring it.".format(cython_version))
cythonize = None
# We will embed this metadata into the package so it can be recalled for debugging.
version = open('VERSION.txt').read().strip()
try:
git_commit, _ = Popen(['git', 'describe', '--tags'], stdout=PIPE, stderr=PIPE).communicate()
except OSError:
git_commit = None
else:
git_commit = git_commit.decode().strip()
_cflag_parser = argparse.ArgumentParser(add_help=False)
_cflag_parser.add_argument('-I', dest='include_dirs', action='append')
_cflag_parser.add_argument('-L', dest='library_dirs', action='append')
_cflag_parser.add_argument('-l', dest='libraries', action='append')
_cflag_parser.add_argument('-D', dest='define_macros', action='append')
_cflag_parser.add_argument('-R', dest='runtime_library_dirs', action='append')
def parse_cflags(raw_cflags):
raw_args = shlex.split(raw_cflags.strip())
args, unknown = _cflag_parser.parse_known_args(raw_args)
config = {k: v or [] for k, v in args.__dict__.items()}
for i, x in enumerate(config['define_macros']):
parts = x.split('=', 1)
value = x[1] or None if len(x) == 2 else None
config['define_macros'][i] = (parts[0], value)
return config, ' '.join(quote(x) for x in unknown)
def get_library_config(name):
"""Get distutils-compatible extension extras for the given library.
This requires ``pkg-config``.
"""
try:
proc = Popen(['pkg-config', '--cflags', '--libs', name], stdout=PIPE, stderr=PIPE)
except OSError:
print('pkg-config is required for building PyAV')
exit(1)
raw_cflags, err = proc.communicate()
if proc.wait():
return
known, unknown = parse_cflags(raw_cflags.decode('utf8'))
if unknown:
print("pkg-config returned flags we don't understand: {}".format(unknown))
exit(1)
return known
def update_extend(dst, src):
"""Update the `dst` with the `src`, extending values where lists.
Primiarily useful for integrating results from `get_library_config`.
"""
for k, v in src.items():
existing = dst.setdefault(k, [])
for x in v:
if x not in existing:
existing.append(x)
def unique_extend(a, *args):
a[:] = list(set().union(a, *args))
# Obtain the ffmpeg dir from the "--ffmpeg-dir=<dir>" argument
FFMPEG_DIR = None
for i, arg in enumerate(sys.argv):
if arg.startswith('--ffmpeg-dir='):
FFMPEG_DIR = arg.split('=')[1]
break
if FFMPEG_DIR is not None:
# delete the --ffmpeg-dir arg so that distutils does not see it
del sys.argv[i]
if not os.path.isdir(FFMPEG_DIR):
print('The specified ffmpeg directory does not exist')
exit(1)
else:
# Check the environment variable FFMPEG_DIR
FFMPEG_DIR = os.environ.get('FFMPEG_DIR')
if FFMPEG_DIR is not None:
if not os.path.isdir(FFMPEG_DIR):
FFMPEG_DIR = None
if FFMPEG_DIR is not None:
ffmpeg_lib = os.path.join(FFMPEG_DIR, 'lib')
ffmpeg_include = os.path.join(FFMPEG_DIR, 'include')
if os.path.exists(ffmpeg_lib):
ffmpeg_lib = [ffmpeg_lib]
else:
ffmpeg_lib = [FFMPEG_DIR]
if os.path.exists(ffmpeg_include):
ffmpeg_include = [ffmpeg_include]
else:
ffmpeg_include = [FFMPEG_DIR]
else:
ffmpeg_lib = []
ffmpeg_include = []
# The "extras" to be supplied to every one of our modules.
# This is expanded heavily by the `config` command.
extension_extra = {
'include_dirs': ['include'] + ffmpeg_include, # The first are PyAV's includes.
'libraries' : [],
'library_dirs': ffmpeg_lib,
}
# The macros which describe the current PyAV version.
config_macros = {
"PYAV_VERSION": version,
"PYAV_VERSION_STR": '"%s"' % version,
"PYAV_COMMIT_STR": '"%s"' % (git_commit or 'unknown-commit'),
}
def dump_config():
"""Print out all the config information we have so far (for debugging)."""
print('PyAV:', version, git_commit or '(unknown commit)')
print('Python:', sys.version.encode('unicode_escape').decode())
print('platform:', platform.platform())
print('extension_extra:')
for k, vs in extension_extra.items():
print('\t%s: %s' % (k, [x.encode('utf8') for x in vs]))
print('config_macros:')
for x in sorted(config_macros.items()):
print('\t%s=%s' % x)
# Monkey-patch for CCompiler to be silent.
def _CCompiler_spawn_silent(cmd, dry_run=None):
"""Spawn a process, and eat the stdio."""
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode:
raise DistutilsExecError(err)
def new_compiler(*args, **kwargs):
"""Create a C compiler.
:param bool silent: Eat all stdio? Defaults to ``True``.
All other arguments passed to ``distutils.ccompiler.new_compiler``.
"""
make_silent = kwargs.pop('silent', True)
cc = _new_compiler(*args, **kwargs)
# If MSVC10, initialize the compiler here and add /MANIFEST to linker flags.
# See Python issue 4431 (https://bugs.python.org/issue4431)
if is_msvc(cc):
from distutils.msvc9compiler import get_build_version
if get_build_version() == 10:
cc.initialize()
for ldflags in [cc.ldflags_shared, cc.ldflags_shared_debug]:
unique_extend(ldflags, ['/MANIFEST'])
# If MSVC14, do not silence. As msvc14 requires some custom
# steps before the process is spawned, we can't monkey-patch this.
elif get_build_version() == 14:
make_silent = False
# monkey-patch compiler to suppress stdout and stderr.
if make_silent:
cc.spawn = _CCompiler_spawn_silent
return cc
_msvc_classes = tuple(filter(None, (MSVCCompiler, MSVC9Compiler, MSVC14Compiler)))
def is_msvc(cc=None):
cc = _new_compiler() if cc is None else cc
return isinstance(cc, _msvc_classes)
if os.name == 'nt':
if is_msvc():
config_macros['inline'] = '__inline'
# Since we're shipping a self contained unit on Windows, we need to mark
# the package as such. On other systems, let it be universal.
class BinaryDistribution(Distribution):
def is_pure(self):
return False
distclass = BinaryDistribution
else:
# Nothing to see here.
distclass = Distribution
# Monkey-patch Cython to not overwrite embedded signatures.
if cythonize:
from Cython.Compiler.AutoDocTransforms import EmbedSignature
old_embed_signature = EmbedSignature._embed_signature
def new_embed_signature(self, sig, doc):
# Strip any `self` parameters from the front.
sig = re.sub(r'\(self(,\s+)?', '(', sig)
# If they both start with the same signature; skip it.
if sig and doc:
new_name = sig.split('(')[0].strip()
old_name = doc.split('(')[0].strip()
if new_name == old_name:
return doc
if new_name.endswith('.' + old_name):
return doc
return old_embed_signature(self, sig, doc)
EmbedSignature._embed_signature = new_embed_signature
# Construct the modules that we find in the "av" directory.
ext_modules = []
for dirname, dirnames, filenames in os.walk('av'):
for filename in filenames:
# We are looing for Cython sources.
if filename.startswith('.') or os.path.splitext(filename)[1] != '.pyx':
continue
pyx_path = os.path.join(dirname, filename)
base = os.path.splitext(pyx_path)[0]
# Need to be a little careful because Windows will accept / or \
# (where os.sep will be \ on Windows).
mod_name = base.replace('/', '.').replace(os.sep, '.')
c_path = os.path.join('src', base + '.c')
# We go with the C sources if Cython is not installed, and fail if
# those also don't exist. We can't `cythonize` here though, since the
# `pyav/include.h` must be generated (by `build_ext`) first.
if not cythonize and not os.path.exists(c_path):
print('Cython is required to build PyAV from raw sources.')
print('Please `pip install Cython`.')
exit(3)
ext_modules.append(Extension(
mod_name,
sources=[c_path if not cythonize else pyx_path],
))
class ConfigCommand(Command):
user_options = [
('no-pkg-config', None,
"do not use pkg-config to configure dependencies"),
('verbose', None,
"dump out configuration"),
('compiler=', 'c',
"specify the compiler type"), ]
boolean_options = ['no-pkg-config']
def initialize_options(self):
self.compiler = None
self.no_pkg_config = None
def finalize_options(self):
self.set_undefined_options('build',
('compiler', 'compiler'),)
self.set_undefined_options('build_ext',
('no_pkg_config', 'no_pkg_config'),)
def run(self):
# For some reason we get the feeling that CFLAGS is not respected, so we parse
# it here. TODO: Leave any arguments that we can't figure out.
for name in 'CFLAGS', 'LDFLAGS':
known, unknown = parse_cflags(os.environ.pop(name, ''))
if unknown:
print("Warning: We don't understand some of {} (and will leave it in the envvar): {}".format(name, unknown))
os.environ[name] = unknown
update_extend(extension_extra, known)
if is_msvc(new_compiler(compiler=self.compiler)):
# Assume we have to disable /OPT:REF for MSVC with ffmpeg
config = {
'extra_link_args': ['/OPT:NOREF'],
}
update_extend(extension_extra, config)
# Check if we're using pkg-config or not
if self.no_pkg_config:
# Simply assume we have everything we need!
config = {
'libraries': ['avformat', 'avcodec', 'avdevice', 'avutil', 'avfilter',
'swscale', 'swresample'],
'library_dirs': [],
'include_dirs': []
}
update_extend(extension_extra, config)
for ext in self.distribution.ext_modules:
for key, value in extension_extra.items():
setattr(ext, key, value)
return
# We're using pkg-config:
errors = []
# Get the config for the libraries that we require.
for name in 'libavformat', 'libavcodec', 'libavdevice', 'libavutil', 'libavfilter', 'libswscale', 'libswresample':
config = get_library_config(name)
if config:
update_extend(extension_extra, config)
# We don't need macros for these, since they all must exist.
else:
errors.append('Could not find ' + name + ' with pkg-config.')
if self.verbose:
dump_config()
# Don't continue if we have errors.
# TODO: Warn Ubuntu 12 users that they can't satisfy requirements with the
# default package sources.
if errors:
print('\n'.join(errors))
exit(1)
# Normalize the extras.
extension_extra.update(
dict((k, sorted(set(v))) for k, v in extension_extra.items())
)
# Apply them.
for ext in self.distribution.ext_modules:
for key, value in extension_extra.items():
setattr(ext, key, value)
class CleanCommand(clean):
user_options = clean.user_options + [
('sources', None,
"remove Cython build output (C sources)")]
boolean_options = clean.boolean_options + ['sources']
def initialize_options(self):
clean.initialize_options(self)
self.sources = None
def run(self):
clean.run(self)
if self.sources:
if os.path.exists('src'):
remove_tree('src', dry_run=self.dry_run)
else:
log.info("'%s' does not exist -- can't clean it", 'src')
class CythonizeCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Cythonize, if required. We do it individually since we must update
# the existing extension instead of replacing them all.
for i, ext in enumerate(self.distribution.ext_modules):
if any(s.endswith('.pyx') for s in ext.sources):
if is_msvc():
ext.define_macros.append(('inline', '__inline'))
new_ext = cythonize(
ext,
compiler_directives=dict(
c_string_type='str',
c_string_encoding='ascii',
embedsignature=True,
language_level=2,
),
build_dir='src',
include_path=ext.include_dirs,
)[0]
ext.sources = new_ext.sources
class BuildExtCommand(build_ext):
if os.name != 'nt':
user_options = build_ext.user_options + [
('no-pkg-config', None,
"do not use pkg-config to configure dependencies")]
boolean_options = build_ext.boolean_options + ['no-pkg-config']
def initialize_options(self):
build_ext.initialize_options(self)
self.no_pkg_config = None
else:
no_pkg_config = 1
def run(self):
# Propagate build options to config
obj = self.distribution.get_command_obj('config')
obj.compiler = self.compiler
obj.no_pkg_config = self.no_pkg_config
obj.include_dirs = self.include_dirs
obj.libraries = self.libraries
obj.library_dirs = self.library_dirs
self.run_command('config')
# We write a header file containing everything we have discovered by
# inspecting the libraries which exist. This is the main mechanism we
# use to detect differenced between FFmpeg and Libav.
include_dir = os.path.join(self.build_temp, 'include')
pyav_dir = os.path.join(include_dir, 'pyav')
try:
os.makedirs(pyav_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
header_path = os.path.join(pyav_dir, 'config.h')
print('writing', header_path)
with open(header_path, 'w') as fh:
fh.write('#ifndef PYAV_COMPAT_H\n')
fh.write('#define PYAV_COMPAT_H\n')
for k, v in sorted(config_macros.items()):
fh.write('#define %s %s\n' % (k, v))
fh.write('#endif\n')
self.include_dirs = self.include_dirs or []
self.include_dirs.append(include_dir)
# Propagate config to cythonize.
for i, ext in enumerate(self.distribution.ext_modules):
unique_extend(ext.include_dirs, self.include_dirs)
unique_extend(ext.library_dirs, self.library_dirs)
unique_extend(ext.libraries, self.libraries)
self.run_command('cythonize')
build_ext.run(self)
setup(
name='av',
version=version,
description="Pythonic bindings for FFmpeg's libraries.",
author="Mike Boers",
author_email="pyav@mikeboers.com",
url="https://github.com/PyAV-Org/PyAV",
packages=find_packages(exclude=['build*', 'examples*', 'scratchpad*', 'tests*']),
zip_safe=False,
ext_modules=ext_modules,
cmdclass={
'build_ext': BuildExtCommand,
'clean': CleanCommand,
'config': ConfigCommand,
'cythonize': CythonizeCommand,
},
test_suite='tests',
entry_points={
'console_scripts': [
'pyav = av.__main__:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Cython',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Conversion',
],
distclass=distclass,
)
|
|
"""
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print srs
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print srs.ellipsoid
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print srs.projected, srs.geographic
False True
>>> srs.import_epsg(32140)
>>> print srs.name
NAD83 / Texas South Central
"""
import re
from types import UnicodeType, TupleType
from ctypes import byref, c_char_p, c_int, c_void_p
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.error import OGRException, SRSException
from django.contrib.gis.gdal.prototypes.srs import *
#### Spatial Reference class. ####
class SpatialReference(object):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL website,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
# Well-Known Geographical Coordinate System Name
_well_known = {'WGS84':4326, 'WGS72':4322, 'NAD27':4267, 'NAD83':4269}
_epsg_regex = re.compile('^(EPSG:)?(?P<epsg>\d+)$', re.I)
_proj_regex = re.compile(r'^\+proj')
#### Python 'magic' routines ####
def __init__(self, srs_input='', srs_type='wkt'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
# Intializing pointer and string buffer.
self._ptr = None
buf = c_char_p('')
if isinstance(srs_input, basestring):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, UnicodeType):
srs_input = srs_input.encode('ascii')
epsg_m = self._epsg_regex.match(srs_input)
proj_m = self._proj_regex.match(srs_input)
if epsg_m:
# Is this an EPSG well known name?
srs_type = 'epsg'
srs_input = int(epsg_m.group('epsg'))
elif proj_m:
# Is the string a PROJ.4 string?
srs_type = 'proj'
elif srs_input in self._well_known:
# Is this a short-hand well known name?
srs_type = 'epsg'
srs_input = self._well_known[srs_input]
elif srs_type == 'proj':
pass
else:
# Setting the buffer with WKT, PROJ.4 string, etc.
buf = c_char_p(srs_input)
elif isinstance(srs_input, int):
# EPSG integer code was input.
if srs_type != 'epsg': srs_type = 'epsg'
elif isinstance(srs_input, c_void_p):
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# SRS input is OGR pointer
srs = srs_input
else:
# Creating a new pointer, using the string buffer.
srs = new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self._ptr = srs
# Post-processing if in PROJ.4 or EPSG formats.
if srs_type == 'proj': self.import_proj(srs_input)
elif srs_type == 'epsg': self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print srs['GEOGCS']
WGS 84
>>> print srs['DATUM']
WGS_1984
>>> print srs['AUTHORITY']
EPSG
>>> print srs['AUTHORITY', 1] # The authority value
4326
>>> print srs['TOWGS84', 4] # the fourth value in this wkt
0
>>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole.
EPSG
>>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis
9122
"""
if isinstance(target, TupleType):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, str) or not isinstance(index, int):
raise TypeError
return get_attr_value(self._ptr, target, index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return get_auth_name(self._ptr, target)
def auth_code(self, target):
"Returns the authority code for the given string target node."
return get_auth_code(self._ptr, target)
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(clone_srs(self._ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
morph_from_esri(self._ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
identify_epsg(self._ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
morph_to_esri(self._ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
srs_validate(self._ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = linear_units(self._ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = linear_units(self._ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = angular_units(self._ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = angular_units(self._ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
if self.projected or self.local:
return linear_units(self._ptr, byref(c_char_p()))
elif self.geographic:
return angular_units(self._ptr, byref(c_char_p()))
else:
return (None, None)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return semi_major(self._ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return semi_minor(self._ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return invflattening(self._ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(isgeographic(self._ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(islocal(self._ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(isprojected(self._ptr))
#### Import Routines #####
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
from_wkt(self._ptr, byref(c_char_p(wkt)))
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
from_proj(self._ptr, proj)
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
from_epsg(self._ptr, epsg)
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
from_xml(self._ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return to_wkt(self._ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return to_pretty_wkt(self._ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return to_proj(self._ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
# FIXME: This leaks memory, have to figure out why.
return to_xml(self._ptr, byref(c_char_p()), dialect)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
morph_to_esri(self._ptr)
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
morph_from_esri(self._ptr)
class CoordTransform(object):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
self._ptr = None # Initially NULL
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise SRSException('source and target must be of type SpatialReference')
self._ptr = new_ct(source._ptr, target._ptr)
if not self._ptr:
raise SRSException('could not intialize CoordTransform object')
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pprint
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.utils import name_like_string
from pyspark.testing.pandasutils import (
have_plotly,
plotly_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_plotly:
from plotly import express
import plotly.graph_objs as go
@unittest.skipIf(not have_plotly, plotly_requirement_message)
@unittest.skipIf(
LooseVersion(pd.__version__) < "1.0.0",
"pandas<1.0; pandas<1.0 does not support latest plotly and/or 'plotting.backend' option.",
)
class SeriesPlotPlotlyTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
pd.set_option("plotting.backend", "plotly")
set_option("plotting.backend", "plotly")
set_option("plotting.max_rows", 1000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.range(1002)
@property
def pdf2(self):
return self.psdf2.to_pandas()
def test_bar_plot(self):
pdf = self.pdf1
psdf = self.psdf1
self.assertEqual(pdf["a"].plot(kind="bar"), psdf["a"].plot(kind="bar"))
self.assertEqual(pdf["a"].plot.bar(), psdf["a"].plot.bar())
def test_line_plot(self):
pdf = self.pdf1
psdf = self.psdf1
self.assertEqual(pdf["a"].plot(kind="line"), psdf["a"].plot(kind="line"))
self.assertEqual(pdf["a"].plot.line(), psdf["a"].plot.line())
def test_barh_plot(self):
pdf = self.pdf1
psdf = self.psdf1
self.assertEqual(pdf["a"].plot(kind="barh"), psdf["a"].plot(kind="barh"))
def test_area_plot(self):
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
self.assertEqual(pdf["sales"].plot(kind="area"), psdf["sales"].plot(kind="area"))
self.assertEqual(pdf["sales"].plot.area(), psdf["sales"].plot.area())
# just a sanity check for df.col type
self.assertEqual(pdf.sales.plot(kind="area"), psdf.sales.plot(kind="area"))
def test_pie_plot(self):
psdf = self.psdf1
pdf = psdf.to_pandas()
self.assertEqual(
psdf["a"].plot(kind="pie"),
express.pie(pdf, values=pdf.columns[0], names=pdf.index),
)
# TODO: support multi-index columns
# columns = pd.MultiIndex.from_tuples([("x", "y")])
# psdf.columns = columns
# pdf.columns = columns
# self.assertEqual(
# psdf[("x", "y")].plot(kind="pie"),
# express.pie(pdf, values=pdf.iloc[:, 0].to_numpy(), names=pdf.index.to_numpy()),
# )
# TODO: support multi-index
# psdf = ps.DataFrame(
# {
# "a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
# "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]
# },
# index=pd.MultiIndex.from_tuples([("x", "y")] * 11),
# )
# pdf = psdf.to_pandas()
# self.assertEqual(
# psdf["a"].plot(kind="pie"), express.pie(pdf, values=pdf.columns[0], names=pdf.index),
# )
def test_hist_plot(self):
def check_hist_plot(psser):
bins = np.array([1.0, 5.9, 10.8, 15.7, 20.6, 25.5, 30.4, 35.3, 40.2, 45.1, 50.0])
data = np.array([5.0, 4.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
prev = bins[0]
text_bins = []
for b in bins[1:]:
text_bins.append("[%s, %s)" % (prev, b))
prev = b
text_bins[-1] = text_bins[-1][:-1] + "]"
bins = 0.5 * (bins[:-1] + bins[1:])
name_a = name_like_string(psser.name)
bars = [
go.Bar(
x=bins,
y=data,
name=name_a,
text=text_bins,
hovertemplate=("variable=" + name_a + "<br>value=%{text}<br>count=%{y}"),
),
]
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
self.assertEqual(
pprint.pformat(psser.plot(kind="hist").to_dict()), pprint.pformat(fig.to_dict())
)
psdf1 = self.psdf1
check_hist_plot(psdf1["a"])
columns = pd.MultiIndex.from_tuples([("x", "y")])
psdf1.columns = columns
check_hist_plot(psdf1[("x", "y")])
def test_pox_plot(self):
def check_pox_plot(psser):
fig = go.Figure()
fig.add_trace(
go.Box(
name=name_like_string(psser.name),
q1=[3],
median=[6],
q3=[9],
mean=[10.0],
lowerfence=[1],
upperfence=[15],
y=[[50]],
boxpoints="suspectedoutliers",
notched=False,
)
)
fig["layout"]["xaxis"]["title"] = name_like_string(psser.name)
fig["layout"]["yaxis"]["title"] = "value"
self.assertEqual(
pprint.pformat(psser.plot(kind="box").to_dict()), pprint.pformat(fig.to_dict())
)
psdf1 = self.psdf1
check_pox_plot(psdf1["a"])
columns = pd.MultiIndex.from_tuples([("x", "y")])
psdf1.columns = columns
check_pox_plot(psdf1[("x", "y")])
def test_pox_plot_arguments(self):
with self.assertRaisesRegex(ValueError, "does not support"):
self.psdf1.a.plot.box(boxpoints="all")
with self.assertRaisesRegex(ValueError, "does not support"):
self.psdf1.a.plot.box(notched=True)
self.psdf1.a.plot.box(hovertext="abc") # other arguments should not throw an exception
def test_kde_plot(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5]})
pdf = pd.DataFrame(
{
"Density": [0.05709372, 0.07670272, 0.05709372],
"names": ["a", "a", "a"],
"index": [-1.0, 3.0, 7.0],
}
)
actual = psdf.a.plot.kde(bw_method=5, ind=3)
expected = express.line(pdf, x="index", y="Density")
expected["layout"]["xaxis"]["title"] = None
self.assertEqual(pprint.pformat(actual.to_dict()), pprint.pformat(expected.to_dict()))
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot_plotly import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
"""
Event parser and human readable log generator.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logbook/
"""
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON, ATTR_HIDDEN, HTTP_BAD_REQUEST,
EVENT_LOGBOOK_ENTRY)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
DOMAIN = 'logbook'
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
GROUP_BY_MINUTES = 15
CONTINUOUS_DOMAINS = ['proximity', 'sensor']
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
def setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
register_built_in_panel(
hass, 'logbook', 'Logbook', 'mdi:format-list-bulleted-type')
hass.services.register(
DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initilalize the logbook view."""
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
hass = request.app['hass']
events = yield from hass.loop.run_in_executor(
None, _get_events, hass, start_day, end_day)
events = _exclude_events(events, self.config)
return self.json(humanify(events))
class Entry(object):
"""A human readable version of the log."""
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
"""Initialize the entry."""
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
"""Convert entry to a dict to be used within JSON."""
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
if entity_id.startswith(tuple('{}.'.format(
domain) for domain in CONTINUOUS_DOMAINS)):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# If last_changed != last_updated only attributes have changed
# we do not report on that yet. Also filter auto groups.
if not to_state or \
to_state.last_changed != to_state.last_updated or \
to_state.domain == 'group' and \
to_state.attributes.get('auto', False):
continue
domain = to_state.domain
# Skip all but the last sensor state
if domain in CONTINUOUS_DOMAINS and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain in CONTINUOUS_DOMAINS and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _get_events(hass, start_day, end_day):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events
from homeassistant.components.recorder.util import (
execute, session_scope)
with session_scope(hass=hass) as session:
query = session.query(Events).order_by(
Events.time_fired).filter(
(Events.time_fired > start_day) &
(Events.time_fired < end_day))
return execute(query)
def _exclude_events(events, config):
"""Get lists of excluded entities and platforms."""
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config.get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# Do not report on new entities
if event.data.get('old_state') is None:
continue
# Do not report on entity removal
if not to_state:
continue
# exclude entities which are customized hidden
hidden = to_state.attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
domain = to_state.domain
entity_id = to_state.entity_id
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
else:
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
else:
return 'has set'
elif state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
|
|
import re
import warnings
from xml.dom.minidom import parseString, Node
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.test.signals import template_rendered, setting_changed
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.utils.translation import deactivate
from django.utils.functional import wraps
from django.utils import six
__all__ = (
'Approximate', 'ContextList', 'get_runner', 'override_settings',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader
def restore_template_loaders():
"""
Restores the original template loaders after
:meth:`setup_test_template_loader` has been run.
"""
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def disable(self):
settings._wrapped = self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
|
|
import datetime
import time
from twisted.trial import unittest
from twisted.internet import defer, task
from twisted.python.failure import Failure
from zope.interface import implements
from mock import patch
from txtorcon import Circuit
from txtorcon import build_timeout_circuit
from txtorcon import Stream
from txtorcon import TorControlProtocol
from txtorcon import TorState
from txtorcon import Router
from txtorcon.router import hexIdFromHash
from txtorcon.interface import IRouterContainer
from txtorcon.interface import ICircuitListener
from txtorcon.interface import ICircuitContainer
from txtorcon.interface import CircuitListenerMixin
from txtorcon.interface import ITorControlProtocol
from mock import Mock
class FakeTorController(object):
implements(IRouterContainer, ICircuitListener, ICircuitContainer, ITorControlProtocol)
post_bootstrap = defer.Deferred()
queue_command = Mock()
def __init__(self):
self.routers = {}
self.circuits = {}
self.extend = []
self.failed = []
def router_from_id(self, i):
return self.routers[i[:41]]
def circuit_new(self, circuit):
self.circuits[circuit.id] = circuit
def circuit_extend(self, circuit, router):
self.extend.append((circuit, router))
def circuit_launched(self, circuit):
pass
def circuit_built(self, circuit):
pass
def circuit_closed(self, circuit, **kw):
if circuit.id in self.circuits:
del self.circuits[circuit.id]
def circuit_failed(self, circuit, **kw):
self.failed.append((circuit, kw))
if circuit.id in self.circuits:
del self.circuits[circuit.id]
def find_circuit(self, circid):
return self.circuits[circid]
def close_circuit(self, circid):
del self.circuits[circid]
return defer.succeed('')
class FakeLocation:
def __init__(self):
self.countrycode = 'NA'
class FakeRouter:
def __init__(self, hsh, nm):
self.name = nm
self.id_hash = hsh
self.id_hex = hexIdFromHash(self.id_hash)
self.location = FakeLocation()
examples = ['CIRC 365 LAUNCHED PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL',
'CIRC 365 BUILT $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL',
'CIRC 365 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED',
'CIRC 365 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=TIMEOUT']
class CircuitTests(unittest.TestCase):
def test_age(self):
"""
make sure age does something sensible at least once.
"""
tor = FakeTorController()
circuit = Circuit(tor)
now = datetime.datetime.now()
update = '1 LAUNCHED PURPOSE=GENERAL TIME_CREATED=%s' % now.strftime('%Y-%m-%dT%H:%M:%S')
circuit.update(update.split())
diff = circuit.age(now=now)
self.assertEquals(diff, 0)
self.assertTrue(circuit.time_created is not None)
@patch('txtorcon.circuit.datetime')
def test_age_default(self, fake_datetime):
"""
age() w/ defaults works properly
"""
from datetime import datetime
now = datetime.fromtimestamp(60.0)
fake_datetime.return_value = now
fake_datetime.utcnow = Mock(return_value=now)
tor = FakeTorController()
circuit = Circuit(tor)
circuit._time_created = datetime.fromtimestamp(0.0)
self.assertEquals(circuit.age(), 60)
self.assertTrue(circuit.time_created is not None)
def test_no_age_yet(self):
"""
make sure age doesn't explode if there's no TIME_CREATED flag.
"""
tor = FakeTorController()
circuit = Circuit(tor)
now = datetime.datetime.now()
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
self.assertTrue(circuit.time_created is None)
diff = circuit.age(now=now)
self.assertEquals(diff, None)
def test_listener_mixin(self):
listener = CircuitListenerMixin()
from zope.interface.verify import verifyObject
self.assertTrue(verifyObject(ICircuitListener, listener))
# call all the methods with None for each arg. This is mostly
# just to gratuitously increase test coverage, but also
# serves to ensure these methods don't just blow up
for (methodname, desc) in ICircuitListener.namesAndDescriptions():
method = getattr(listener, methodname)
args = [None] * len(desc.positional)
method(*args)
def test_unlisten(self):
tor = FakeTorController()
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = FakeRouter(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a'
)
circuit = Circuit(tor)
circuit.listen(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
circuit.unlisten(tor)
circuit.update('1 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.circuits), 1)
self.assertTrue(1 in tor.circuits)
self.assertEqual(len(tor.extend), 0)
self.assertEqual(1, len(circuit.path))
self.assertEqual(0, len(circuit.listeners))
def test_path_update(self):
cp = TorControlProtocol()
state = TorState(cp, False)
circuit = Circuit(state)
circuit.update('1 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(1, len(circuit.path))
self.assertEqual(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0',
circuit.path[0].id_hex
)
self.assertEqual('eris', circuit.path[0].name)
def test_wrong_update(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
self.assertRaises(
Exception,
circuit.update,
'2 LAUNCHED PURPOSE=GENERAL'.split()
)
def test_closed_remaining_streams(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
stream = Stream(tor)
stream.update("1 NEW 0 94.23.164.42.$43ED8310EB968746970896E8835C2F1991E50B69.exit:9001 SOURCE_ADDR=(Tor_internal):0 PURPOSE=DIR_FETCH".split())
circuit.streams.append(stream)
self.assertEqual(len(circuit.streams), 1)
circuit.update('1 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED'.split())
circuit.update('1 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=TIMEOUT'.split())
errs = self.flushLoggedErrors()
self.assertEqual(len(errs), 2)
def test_updates(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = FakeRouter(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a'
)
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = FakeRouter(
'$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b'
)
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = FakeRouter(
'$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c'
)
for ex in examples[:-1]:
circuit.update(ex.split()[1:])
self.assertEqual(circuit.state, ex.split()[2])
self.assertEqual(circuit.purpose, 'GENERAL')
if '$' in ex:
self.assertEqual(
len(circuit.path),
len(ex.split()[3].split(','))
)
for (r, p) in zip(ex.split()[3].split(','), circuit.path):
d = r.split('=')[0]
self.assertEqual(d, p.id_hash)
def test_extend_messages(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('365 LAUNCHED PURPOSE=GENERAL'.split())
self.assertEqual(tor.extend, [])
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 1)
self.assertEqual(tor.extend[0], (circuit, a))
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 2)
self.assertEqual(tor.extend[0], (circuit, a))
self.assertEqual(tor.extend[1], (circuit, b))
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 3)
self.assertEqual(tor.extend[0], (circuit, a))
self.assertEqual(tor.extend[1], (circuit, b))
self.assertEqual(tor.extend[2], (circuit, c))
def test_extends_no_path(self):
'''
without connectivity, it seems you get EXTENDS messages with no
path update.
'''
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('753 EXTENDED BUILD_FLAGS=IS_INTERNAL,NEED_CAPACITY,NEED_UPTIME PURPOSE=MEASURE_TIMEOUT TIME_CREATED=2012-07-30T18:23:18.956704'.split())
self.assertEqual(tor.extend, [])
self.assertEqual(circuit.path, [])
self.assertTrue('IS_INTERNAL' in circuit.build_flags)
self.assertTrue('NEED_CAPACITY' in circuit.build_flags)
self.assertTrue('NEED_UPTIME' in circuit.build_flags)
def test_str(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.id = 1
str(circuit)
router = Router(tor)
circuit.path.append(router)
str(circuit)
def test_failed_reason(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL REASON=TIMEOUT'.split())
self.assertEqual(len(tor.failed), 1)
circ, kw = tor.failed[0]
self.assertEqual(circ, circuit)
self.assertTrue('PURPOSE' in kw)
self.assertTrue('REASON' in kw)
self.assertEqual(kw['PURPOSE'], 'GENERAL')
self.assertEqual(kw['REASON'], 'TIMEOUT')
def test_close_circuit(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
self.assertEqual(3, len(circuit.path))
d = circuit.close()
# we already pretended that Tor answered "OK" to the
# CLOSECIRCUIT call (see close_circuit() in FakeTorController
# above) however the circuit isn't "really" closed yet...
self.assertTrue(not d.called)
# not unit-test-y? shouldn't probably delve into internals I
# suppose...
self.assertTrue(circuit._closing_deferred is not None)
# simulate that Tor has really closed the circuit for us
# this should cause our Deferred to callback
circuit.update('123 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED'.split())
# confirm that our circuit callback has been triggered already
self.assertRaises(
defer.AlreadyCalledError,
d.callback,
"should have been called already"
)
return d
def test_is_built(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
built0 = circuit.is_built
built1 = circuit.when_built()
self.assertTrue(built0 is not built1)
self.assertFalse(built0.called)
self.assertFalse(built1.called)
circuit.update('123 BUILT $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
# create callback when we're alread in BUILT; should be
# callback'd already
built2 = circuit.when_built()
self.assertTrue(built2 is not built1)
self.assertTrue(built2 is not built0)
self.assertTrue(built0.called)
self.assertTrue(built1.called)
self.assertTrue(built2.called)
self.assertTrue(built0.result == circuit)
self.assertTrue(built1.result == circuit)
self.assertTrue(built2.result == circuit)
def test_is_built_errback(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
state = TorState(tor)
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
state.circuit_new(circuit)
d = circuit.when_built()
state.circuit_closed(circuit)
self.assertTrue(d.called)
self.assertTrue(isinstance(d.result, Failure))
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for ssh module.
"""
from __future__ import print_function
import os
import random
import socket
import subprocess
from helpers import unittest
import target_test
from luigi.contrib.ssh import RemoteContext, RemoteTarget
working_ssh_host = 'localhost'
# set this to a working ssh host string (e.g. "localhost") to activate integration tests
# The following tests require a working ssh server at `working_ssh_host`
# the test runner can ssh into using password-less authentication
# since `nc` has different syntax on different platforms
# we use a short python command to start
# a 'hello'-server on the remote machine
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
try:
x = subprocess.check_output(
"ssh %s -S none -o BatchMode=yes 'echo 1'" % working_ssh_host,
shell=True
)
if x != b'1\n':
raise unittest.SkipTest('Not able to connect to ssh server')
except Exception:
raise unittest.SkipTest('Not able to connect to ssh server')
class TestRemoteContext(unittest.TestCase):
def setUp(self):
self.context = RemoteContext(working_ssh_host)
def tearDown(self):
try:
self.remote_server_handle.terminate()
except Exception:
pass
def test_check_output(self):
""" Test check_output ssh
Assumes the running user can ssh to working_ssh_host
"""
output = self.context.check_output(["echo", "-n", "luigi"])
self.assertEqual(output, b"luigi")
def test_tunnel(self):
print("Setting up remote listener...")
self.remote_server_handle = self.context.Popen([
"python", "-c", '"{0}"'.format(HELLO_SERVER_CMD)
], stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.context.tunnel(2135, 2134):
print("Tunnel up!")
# hack to make sure the listener process is up
# and running before we write to it
server_output = self.remote_server_handle.stdout.read(5)
self.assertEqual(server_output, b"ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...",)
response = s.recv(5)
self.assertEqual(response, b"hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = self.remote_server_handle.communicate()
self.assertEqual(self.remote_server_handle.returncode, 0)
print("Closing tunnel")
class TestRemoteTarget(unittest.TestCase):
""" These tests assume RemoteContext working
in order for setUp and tearDown to work
"""
def setUp(self):
self.ctx = RemoteContext(working_ssh_host)
self.filepath = "/tmp/luigi_remote_test.dat"
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.ctx.check_output(["rm", "-rf", self.filepath])
self.ctx.check_output(["echo -n 'hello' >", self.filepath])
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.filepath])
def test_exists(self):
self.assertTrue(self.target.exists())
no_file = RemoteTarget(
"/tmp/_file_that_doesnt_exist_",
working_ssh_host,
)
self.assertFalse(no_file.exists())
def test_remove(self):
self.target.remove()
self.assertRaises(
subprocess.CalledProcessError,
self.ctx.check_output,
["cat", self.filepath]
)
def test_open(self):
f = self.target.open('r')
file_content = f.read()
f.close()
self.assertEqual(file_content, "hello")
def test_context_manager(self):
with self.target.open('r') as f:
file_content = f.read()
self.assertEqual(file_content, "hello")
class TestRemoteTargetAtomicity(unittest.TestCase, target_test.FileSystemTargetTestMixin):
path = '/tmp/luigi_remote_atomic_test.txt'
ctx = RemoteContext(working_ssh_host)
def create_target(self, format=None):
return RemoteTarget(self.path, working_ssh_host, format=format)
def _exists(self, path):
try:
self.ctx.check_output(["test", "-e", path])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
return True
def assertCleanUp(self, tp):
self.assertFalse(self._exists(tp))
def setUp(self):
self.ctx.check_output(["rm", "-rf", self.path])
self.local_file = '/tmp/local_luigi_remote_atomic_test.txt'
if os.path.exists(self.local_file):
os.remove(self.local_file)
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.path])
if os.path.exists(self.local_file):
os.remove(self.local_file)
def test_put(self):
f = open(self.local_file, 'w')
f.write('hello')
f.close()
t = RemoteTarget(self.path, working_ssh_host)
t.put(self.local_file)
self.assertTrue(self._exists(self.path))
def test_get(self):
self.ctx.check_output(["echo -n 'hello' >", self.path])
t = RemoteTarget(self.path, working_ssh_host)
t.get(self.local_file)
f = open(self.local_file, 'r')
file_content = f.read()
self.assertEqual(file_content, 'hello')
class TestRemoteTargetCreateDirectories(TestRemoteTargetAtomicity):
path = '/tmp/%s/xyz/luigi_remote_atomic_test.txt' % random.randint(0, 999999999)
class TestRemoteTargetRelative(TestRemoteTargetAtomicity):
path = 'luigi_remote_atomic_test.txt'
|
|
#!/usr/bin/env python
"""
# =============================================================================
Copyright Government of Canada 2015-2017
Written by: Eric Marinier, Public Health Agency of Canada,
National Microbiology Laboratory
Funded by the National Micriobiology Laboratory and the Genome Canada / Alberta
Innovates Bio Solutions project "Listeria Detection and Surveillance
using Next Generation Genomics"
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# =============================================================================
"""
"""
# =============================================================================
This file contains the JobManagerParallel class. JobManagerParallel is
responsible for managing the creation and execution of Python multiprocessing
parallel jobs. This class is designed for Neptune execution on a single CPU or
node.
# =============================================================================
"""
import multiprocessing
import subprocess
import JobManager
import CountKMers
import AggregateKMers
import ExtractSignatures
import FilterSignatures
import ConsolidateSignatures
import Database
# DEFAULTS
PROCESSES_DEFAULT = 8
"""
# =============================================================================
JOB MANAGER
# =============================================================================
"""
class JobManagerParallel(JobManager.JobManager):
"""
# =========================================================================
CONSTRUCTOR
-----------
INPUT
-----
[FILE LOCATION] [outputDirectoryLocation]
The directory location to write Neptune output.
[FILE LOCATION] [logDirectoryLocation]
The directory location to write output logs and error logs.
[INT >= 1] [parallel]
The number of worker processes to create.
# =========================================================================
"""
def __init__(
self, outputDirectoryLocation, logDirectoryLocation,
parallel=PROCESSES_DEFAULT):
self.pool = multiprocessing.Pool(processes=parallel)
# JobManager Parent Constructor
JobManager.JobManager.__init__(
self, outputDirectoryLocation, logDirectoryLocation)
"""
# =========================================================================
RUN JOBS
--------
PURPOSE
-------
Runs all the Neptune jobs provided to the function. The jobs are
synchronized and execution problems are reported if possible.
INPUT
-----
[JOB LIST] [jobs]
The jobs to run in parallel. As the parallel jobs are automatically
initiated when they created, this process will monitor the jobs and
not return until they are complete.
This function must be implemented because it extends the JobManager
class.
RETURN
------
None
POST
----
The function will return when all jobs have completed.
# =========================================================================
"""
def runJobs(self, jobs):
print "Submitted " + str(len(jobs)) + " jobs."
self.synchronize(jobs)
"""
# =========================================================================
SYNCHRONIZE
-----------
PURPOSE
-------
Synchronizes all the jobs associated with the passed IDs. This function
will return when all jobs have completed. This function does not check
the return codes.
INPUT
-----
[STRING ITERATOR] [jobIDs]
The unique IDs associated with every job.
RETURN
------
None
POST
----
The function will return when all jobs have completed.
# =========================================================================
"""
def synchronize(self, jobs):
for job in jobs:
job.get() # get() over wait() to propagate excetions upwards
"""
# =========================================================================
CREATE COUNT JOB
----------------
PURPOSE
-------
Creates a CountKMers job.
INPUT
-----
[FILE LOCATION] [inputLocation]
The location of the input file.
[FILE LOCATION] [outputLocation]
The location of the output file.
[1 <= INT] [k]
The size of the k-mers.
[0 <= INT] [organization]
The degree of k-mer organization.
RETURN
------
[JOB ID] [job]
A CountKMers job ID that may be passed to RunJobs(...).
# =========================================================================
"""
def createCountJob(
self, inputLocation, outputLocation, k, organization):
parameters = {}
parameters[CountKMers.INPUT] = inputLocation
parameters[CountKMers.OUTPUT] = outputLocation
parameters[CountKMers.KMER] = k
parameters[CountKMers.ORGANIZATION] = organization
job = self.pool.apply_async(
submit, args=(CountKMers.parse, [parameters], ))
return job
"""
# =========================================================================
CREATE AGGREGATE JOB
--------------------
PURPOSE
-------
Creates an AggregateKMers job.
INPUT
-----
[STRING ITERATOR] [inclusionLocations]
An iterable object of all inclusion file locations.
[STRING ITERATOR] [exclusionLocations]
An iterable object of all exclusion file locations.
[FILE LOCATION] [outputLocation]
The output file location.
[STRING -- OPTIONAL] [tag]
The organization tag; used to generate appropriate file names from the
inclusion and exclusion iterators.
This [tag] relates to the following functions:
Utility.getAggregationTags(...)
CountKMers.count(...)
CountKMers.writeMultipleFiles(...)
Neptune.aggregateMultipleFiles(...)
RETURN
------
[JOB ID] [job]
An AggregateKMers job ID that may be passed to RunJobs(...).
# =========================================================================
"""
def createAggregateJob(
self, inclusionLocations, exclusionLocations,
outputLocation, tag):
parameters = {}
inclusion = []
exclusion = []
# INCLUSION
if tag:
inclusion += (item + "." + tag for item in inclusionLocations)
else:
inclusion += inclusionLocations
parameters[AggregateKMers.INCLUSION] = inclusion
# EXCLUSION
if tag:
exclusion += (item + "." + tag for item in exclusionLocations)
else:
exclusion += exclusionLocations
parameters[AggregateKMers.EXCLUSION] = exclusion
# OUTPUT
parameters[AggregateKMers.OUTPUT] = outputLocation
# DELETE
parameters[AggregateKMers.DELETE] = True
job = self.pool.apply_async(
submit, args=(AggregateKMers.parse, [parameters], ))
return job
"""
# =========================================================================
CREATE EXTRACT JOB
------------------
PURPOSE
-------
Creates an ExtractSignatures job.
INPUT
-----
[FILE LOCATION] [referenceLocation]
The location of the reference to extract candidates.
[1 <= INT -- OPTIONAL] [referenceSize]
The size of the reference.
[0 <= FLOAT <= 1 -- OPTIONAL] [rate]
The SNV rate.
[1 <= INT -- OPTIONAL] [inclusion]
The number of inclusion genome files.
[0 <= INT -- OPTIONAL] [inhits]
The minimum number of inclusion k-mer hits.
[1 <= INT -- OPTIONAL] [exclusion]
The number of exclusion genome files.
[0 <= INT -- OPTIONAL] [exhits]
The maximum number of exclusion k-mer hits.
[1 <= INT -- OPTIONAL] [gap]
The maximum inclusion k-mer gap size.
[1 <= INT -- OPTIONAL] [size]
The minimum size of any candidate.
[0 <= FLOAT <= 1 -- OPTIONAL] [GC]
The GC-content of the environment.
[0 < FLOAT < 1 -- OPTIONAL] [confidence]
The statistical confidence.
[FILE LOCATION] [aggregateLocation]
The location of the aggregation file.
[FILE LOCATION] [outputLocation]
The location of the output file.
RETURN
------
[JOB ID] [job]
An ExtractSignatures job ID that may be passed to RunJobs(...).
# =========================================================================
"""
def createExtractJob(
self, referenceLocation, referenceSize, rate, inclusion, inhits,
exclusion, exhits, gap, size, GC, confidence, aggregateLocation,
outputLocation):
parameters = {}
# REFERENCE
parameters[ExtractSignatures.REFERENCE] = referenceLocation
# REFERENCE SIZE
parameters[ExtractSignatures.REFERENCE_SIZE] = referenceSize \
if referenceSize else None
# RATE
parameters[ExtractSignatures.RATE] = rate \
if rate else None
# INCLUSION
parameters[ExtractSignatures.INCLUSION] = inclusion \
if inclusion else None
# INHITS
parameters[ExtractSignatures.INHITS] = inhits \
if inhits else None
# EXCLUSION
parameters[ExtractSignatures.EXCLUSION] = exclusion \
if exclusion else None
# EXHITS
parameters[ExtractSignatures.EXHITS] = exhits \
if exhits else None
# GAP
parameters[ExtractSignatures.GAP] = gap \
if gap else None
# SIZE
parameters[ExtractSignatures.SIZE] = size \
if size else None
# GC
parameters[ExtractSignatures.GC_CONTENT] = GC \
if GC else None
# CONFIDENCE
parameters[ExtractSignatures.CONFIDENCE] = confidence \
if confidence else None
# AGGREGATED KMERS
parameters[ExtractSignatures.KMERS] = aggregateLocation
# OUTPUT
parameters[ExtractSignatures.OUTPUT] = outputLocation
job = self.pool.apply_async(
submit, args=(ExtractSignatures.parse, [parameters], ))
return job
"""
# =========================================================================
CREATE DATABASE JOB
-------------------
PURPOSE
-------
Creates a BuildDatabase job.
INPUT
-----
[(FILE LOCATION) ITERATOR] [inputLocations]
The input locations of the entries (FASTA) in the database.
[FILE LOCATION] [outputLocation]
The output location of the database.
RETURN
------
[JOB] [job]
An BuildDatabase job that may be passed to RunJobs(...).
# =========================================================================
"""
def createDatabaseJob(
self, inputLocations, aggregatedLocation, outputLocation):
aggregatedFile = open(aggregatedLocation, 'w')
ID = 0
for inputLocation in inputLocations:
inputFile = open(inputLocation, 'r')
for line in inputFile:
if line[0] is ">":
aggregatedFile.write(">" + str(ID) + "\n")
else:
aggregatedFile.write(line)
inputFile.close()
ID += 1
aggregatedFile.close()
parameters = [aggregatedLocation, outputLocation]
# NOTE: parameters is already a list
job = self.pool.apply_async(
submit, args=(Database.createDatabaseJob, parameters, ))
return job
"""
# =========================================================================
CREATE FILTER JOB
-----------------
PURPOSE
-------
Creates a FilterSignatures job.
INPUT
-----
[FILE LOCATION] [inclusionDatabaseLocation]
The location of the inclusion database to compare signatures against.
[FILE LOCATION] [exclusionDatabaseLocation]
The location of the exclusion database to compare signatures against.
[(FILE LOCATION) LIST] [inclusion]
The list of inclusion files.
[(FILE LOCATION) LIST] [exclusion]
The list of exclusion files.
[FILE LOCATION] [inputLocation]
The candidate signatures to filter.
[FILE LOCATION] [filteredOutputLocation]
The filtered output location.
[FILE LOCATION] [sortedOutputLocation]
The sorted output location.
[0 <= FLOAT <= 1] [filterLength]
The maximum percent length of an exclusion hit with a candidate.
[0 <= FLOAT <= 1] [filterPercent]
The maximum percent identity of an exclusion hit with a candidate.
[4 <= INT] [seedSize]
The seed size used in alignments.
RETURN
------
[JOB ID] [job]
A FilterSignatures job ID that may be passed to RunJobs(...).
# =========================================================================
"""
def createFilterJob(
self, inclusionDatabaseLocation, exclusionDatabaseLocation,
inclusion, exclusion, inputLocation, filteredOutputLocation,
sortedOutputLocation, filterLength, filterPercent, seedSize):
parameters = {}
# INCLUSION DATABASE
parameters[FilterSignatures.INCLUSION_DATABASE] = \
inclusionDatabaseLocation
# EXCLUSION DATABASE
parameters[FilterSignatures.EXCLUSION_DATABASE] = \
exclusionDatabaseLocation
# INCLUSION
parameters[FilterSignatures.INCLUSION] = inclusion \
if inclusion else None
# EXCLUSION
parameters[FilterSignatures.EXCLUSION] = exclusion \
if exclusion else None
# INPUT
parameters[FilterSignatures.INPUT] = inputLocation
# FILTERED OUTPUT
parameters[FilterSignatures.FILTERED_OUTPUT] = filteredOutputLocation
# SORTED OUTPUT
parameters[FilterSignatures.SORTED_OUTPUT] = sortedOutputLocation
# FILTER LENGTH
parameters[FilterSignatures.FILTER_LENGTH] = filterLength \
if filterLength else None
# FILTER PERCENT
parameters[FilterSignatures.FILTER_PERCENT] = filterPercent \
if filterPercent else None
# SEED SIZE
parameters[FilterSignatures.SEED_SIZE] = seedSize \
if seedSize else None
job = self.pool.apply_async(
submit, args=(FilterSignatures.parse, [parameters], ))
return job
"""
# =========================================================================
CREATE CONSOLIDATE JOB
----------------------
PURPOSE
-------
Creates a ConsolidateSignatures job.
INPUT
-----
[(FILE LOCATION) LIST] [signatureLocations]
A list of Neptune signature file locations corresponding to files to
consolidate.
[4 <= INT] [seedSize]
The seed size used in alignments.
[(FILE DIRECTORY) LOCATION] [outputDirectoryLocation]
The directory to write the output files.
RETURN
------
[JOB] [job]
A ConsolidateSignatures job that may be passed to RunJobs(...).
# =========================================================================
"""
def createConsolidateJob(
self, signatureLocations, seedSize, outputDirectoryLocation):
parameters = {}
# SIGNATURE LOCATIONS
parameters[ConsolidateSignatures.SIGNATURES] = signatureLocations \
if signatureLocations else None
# SEED SIZE
parameters[ConsolidateSignatures.SEED_SIZE] = seedSize \
if seedSize else None
# OUTPUT DIRECTORY LOCATION
parameters[ConsolidateSignatures.OUTPUT] = outputDirectoryLocation \
if outputDirectoryLocation else None
job = self.pool.apply_async(
submit, args=(ConsolidateSignatures.parse, [parameters], ))
return job
"""
# =============================================================================
SUBMIT
------
PURPOSE
-------
This submit function wraps all function calls in a try-except block that
explicitly catches CalledProcessError that are thrown. This function should be
called directly by all pool.apply_async(...) calls, instead of having
pool.apply_async(...) directly call the intended function.
The purpose of this function to is work around an issue in Python 2.7 with
CalledProcessError exceptions failing to propogate their exceptions upwards to
the calling process. They instead report a misleading error and hang the
program in a state that is hard to terminate (ctrl-C did not work). We work
around this problem by catching CalledProcessError and throwing a new generic
Exception in its place. This exception will terminate the execution of the
entire program so long as JobManagerParallel's synchronize() class method is
using .get() to wait for jobs to finish and not .wait().
ERROR: https://bugs.python.org/issue9400
NOTE: This solution might only be required in Python 2.7 and not in Python 3.
INPUT
-----
[FUNCTION] [function]
The function to be executed.
[LIST ARGUMENTS] [arguments]
The arguments that will be passed to [function]. This must be prepared as
a list because the function will unwrap the list to pass the arguments.
This means that single items must be passed as a list.
Example:
parameters = "filename.txt"
submit(function, [parameters])
POST
----
The [function] will be run with [parameters] with a try-except block. When a
CalledProcessError is raised, this function will catch the error and throw a
generic Exception in it's place, which will help terminate the execution of the
software immediately.
# =============================================================================
"""
def submit(function, arguments):
# NOTE: This may only be required in Python 2.7.
try:
function(*arguments)
except subprocess.CalledProcessError as cpe:
raise Exception(str(cpe))
|
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for monitoring backends."""
import time
import threading
import types
from IPython.display import display # pylint: disable=import-error
from IPython.core.magic import line_magic, Magics, magics_class # pylint: disable=import-error
from IPython.core import magic_arguments # pylint: disable=import-error
import matplotlib.pyplot as plt # pylint: disable=import-error
import ipywidgets as widgets # pylint: disable=import-error
from qiskit.tools.monitor.backend_overview import get_unique_backends
from qiskit.visualization.gate_map import plot_gate_map
@magics_class
class BackendOverview(Magics):
"""A class of status magic functions.
"""
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-i',
'--interval',
type=float,
default=60,
help='Interval for status check.'
)
def qiskit_backend_overview(self, line='', cell=None):
"""A Jupyter magic function to monitor backends.
"""
del cell # Unused
args = magic_arguments.parse_argstring(
self.qiskit_backend_overview, line)
unique_hardware_backends = get_unique_backends()
_value = "<h2 style ='color:#ffffff; background-color:#000000;"
_value += "padding-top: 1%; padding-bottom: 1%;padding-left: 1%;"
_value += "margin-top: 0px'>Backend Overview</h2>"
backend_title = widgets.HTML(value=_value,
layout=widgets.Layout(margin='0px 0px 0px 0px'))
build_back_widgets = [backend_widget(b)
for b in unique_hardware_backends]
_backends = []
# Sort backends by operational or not
oper_ord_backends = []
for n, back in enumerate(unique_hardware_backends):
if back.status().operational:
oper_ord_backends = [build_back_widgets[n]] + oper_ord_backends
_backends = [back] + _backends
else:
oper_ord_backends = oper_ord_backends + [build_back_widgets[n]]
_backends = _backends + [back]
qubit_label = widgets.Label(value='Num. Qubits')
pend_label = widgets.Label(value='Pending Jobs')
least_label = widgets.Label(value='Least Busy')
oper_label = widgets.Label(
value='Operational', layout=widgets.Layout(margin='5px 0px 0px 0px'))
t12_label = widgets.Label(
value='Avg. T1 / T2', layout=widgets.Layout(margin='10px 0px 0px 0px'))
cx_label = widgets.Label(
value='Avg. CX Err.', layout=widgets.Layout(margin='10px 0px 0px 0px'))
meas_label = widgets.Label(
value='Avg. Meas. Err.', layout=widgets.Layout(margin='10px 0px 0px 0px'))
labels_widget = widgets.VBox([qubit_label, pend_label, oper_label,
least_label, t12_label, cx_label, meas_label],
layout=widgets.Layout(margin='295px 0px 0px 0px',
min_width='100px'))
backend_grid = GridBox_with_thread(children=oper_ord_backends,
layout=widgets.Layout(
grid_template_columns='250px ' *
len(unique_hardware_backends),
grid_template_rows='auto',
grid_gap='0px 25px'))
backend_grid._backends = _backends # pylint: disable=attribute-defined-outside-init
backend_grid._update = types.MethodType( # pylint: disable=attribute-defined-outside-init
update_backend_info, backend_grid)
backend_grid._thread = threading.Thread( # pylint: disable=attribute-defined-outside-init
target=backend_grid._update, args=(args.interval,))
backend_grid._thread.start()
back_box = widgets.HBox([labels_widget, backend_grid])
back_monitor = widgets.VBox([backend_title, back_box])
display(back_monitor)
class GridBox_with_thread(widgets.GridBox): # pylint: disable=invalid-name
"""A GridBox that will close an attached thread
"""
def __del__(self):
"""Object disposal"""
if hasattr(self, '_thread'):
try:
self._thread.do_run = False
self._thread.join()
except Exception: # pylint: disable=broad-except
pass
self.close()
def backend_widget(backend):
"""Creates a backend widget.
"""
config = backend.configuration().to_dict()
props = backend.properties().to_dict()
name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()),
layout=widgets.Layout())
n_qubits = config['n_qubits']
qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=n_qubits),
layout=widgets.Layout(justify_content='center'))
cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px',
max_height='250px',
min_height='250px',
justify_content='center',
align_items='center',
margin='0px 0px 0px 0px'))
with cmap:
_cmap_fig = plot_gate_map(backend,
plot_directed=False,
label_qubits=False)
if _cmap_fig is not None:
display(_cmap_fig)
# Prevents plot from showing up twice.
plt.close(_cmap_fig)
pending = generate_jobs_pending_widget()
is_oper = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
least_busy = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
t1_units = props['qubits'][0][0]['unit']
avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/n_qubits, 1)
avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/n_qubits, 1)
t12_widget = widgets.HTML(value="<h5>{t1} / {t2} {units}</h5>".format(t1=avg_t1,
t2=avg_t2,
units=t1_units),
layout=widgets.Layout())
sum_cx_err = 0
num_cx = 0
for gate in props['gates']:
if gate['gate'] == 'cx':
for param in gate['parameters']:
if param['name'] == 'gate_error':
# Value == 1.0 means gate effectively off
if param['value'] != 1.0:
sum_cx_err += param['value']
num_cx += 1
avg_cx_err = round(sum_cx_err/(num_cx), 4)
cx_widget = widgets.HTML(value="<h5>{cx_err}</h5>".format(cx_err=avg_cx_err),
layout=widgets.Layout())
avg_meas_err = 0
for qub in props['qubits']:
for item in qub:
if item['name'] == 'readout_error':
avg_meas_err += item['value']
avg_meas_err = round(avg_meas_err/n_qubits, 4)
meas_widget = widgets.HTML(value="<h5>{meas_err}</h5>".format(meas_err=avg_meas_err),
layout=widgets.Layout())
out = widgets.VBox([name, cmap, qubit_count, pending, is_oper, least_busy,
t12_widget, cx_widget, meas_widget],
layout=widgets.Layout(display='inline-flex',
flex_flow='column',
align_items='center'))
out._is_alive = True
return out
def update_backend_info(self, interval=60):
"""Updates the monitor info
Called from another thread.
"""
my_thread = threading.currentThread()
current_interval = 0
started = False
all_dead = False
stati = [None]*len(self._backends)
while getattr(my_thread, "do_run", True) and not all_dead:
if current_interval == interval or started is False:
for ind, back in enumerate(self._backends):
_value = self.children[ind].children[2].value
_head = _value.split('<b>')[0]
try:
_status = back.status()
stati[ind] = _status
except Exception: # pylint: disable=broad-except
self.children[ind].children[2].value = _value.replace(
_head, "<h5 style='color:#ff5c49'>")
self.children[ind]._is_alive = False
else:
self.children[ind]._is_alive = True
self.children[ind].children[2].value = _value.replace(
_head, "<h5>")
idx = list(range(len(self._backends)))
pending = [s.pending_jobs for s in stati]
_, least_idx = zip(*sorted(zip(pending, idx)))
# Make sure least pending is operational
for ind in least_idx:
if stati[ind].operational:
least_pending_idx = ind
break
for var in idx:
if var == least_pending_idx:
self.children[var].children[5].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[5].value = "<h5 style='color:#dc267f'>False</h5>"
self.children[var].children[3].children[1].value = pending[var]
self.children[var].children[3].children[1].max = max(
self.children[var].children[3].children[1].max, pending[var]+10)
if stati[var].operational:
self.children[var].children[4].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[4].value = "<h5 style='color:#dc267f'>False</h5>"
started = True
current_interval = 0
time.sleep(1)
all_dead = not any([wid._is_alive for wid in self.children])
current_interval += 1
def generate_jobs_pending_widget():
"""Generates a jobs_pending progress bar widget.
"""
pbar = widgets.IntProgress(
value=0,
min=0,
max=50,
description='',
orientation='horizontal', layout=widgets.Layout(max_width='180px'))
pbar.style.bar_color = '#71cddd'
pbar_current = widgets.Label(
value=str(pbar.value), layout=widgets.Layout(min_width='auto'))
pbar_max = widgets.Label(
value=str(pbar.max), layout=widgets.Layout(min_width='auto'))
def _on_max_change(change):
pbar_max.value = str(change['new'])
def _on_val_change(change):
pbar_current.value = str(change['new'])
pbar.observe(_on_max_change, names='max')
pbar.observe(_on_val_change, names='value')
jobs_widget = widgets.HBox([pbar_current, pbar, pbar_max],
layout=widgets.Layout(max_width='250px',
min_width='250px',
justify_content='center'))
return jobs_widget
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python source file include mrpc pipeline functions and necessary utils."""
from typing import List
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_hub as hub
import tensorflow_transform as tft
from tfx import v1 as tfx
from tfx.components.transform import stats_options_util
from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier
from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor
from tfx_bsl.public import tfxio
from google.protobuf import text_format
_BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2'
_BERT_VOCAB = 'bert_vocab'
_INPUT_WORD_IDS = 'input_word_ids'
_INPUT_MASK = 'input_mask'
_SEGMENT_IDS = 'segment_ids'
_EPOCHS = 1
_EVAL_BATCH_SIZE = 32
_FEATURE_KEY_A = 'sentence1'
_FEATURE_KEY_B = 'sentence2'
_LABEL_KEY = 'label'
_MAX_LEN = 128
_TRAIN_BATCH_SIZE = 32
def _tokenize(sequence_a, sequence_b):
"""Tokenize the two sentences and insert appropriate tokens."""
processor = BertPreprocessor(_BERT_LINK)
vocab = processor.get_vocab_name()
# Annotate asset provides the mapping between the name (_BERT_VOCAB) and the
# path within the StatsOptions object passed to TFDV (
# https://github.com/tensorflow/data-validation/blob/master/
# tensorflow_data_validation/statistics/stats_options.py).
# This vocab can then be used to compute NLP statistics (see the description
# of the stats_options_updater_fn below.
tft.annotate_asset(_BERT_VOCAB, vocab.decode())
return processor.tokenize_sentence_pair(
tf.reshape(sequence_a, [-1]), tf.reshape(sequence_b, [-1]), _MAX_LEN)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature Tensors.
"""
input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY_A],
inputs[_FEATURE_KEY_B])
return {
_LABEL_KEY: inputs[_LABEL_KEY],
_INPUT_WORD_IDS: input_word_ids,
_INPUT_MASK: input_mask,
_SEGMENT_IDS: segment_ids
}
def stats_options_updater_fn(
stats_type: stats_options_util.StatsType,
stats_options: tfdv.StatsOptions) -> tfdv.StatsOptions:
"""Update transform stats.
This function is called by the Transform component before it computes
pre-transform or post-transform statistics. It takes as input a stats_type,
which indicates whether this call is intended for pre-transform or
post-transform statistics. It also takes as argument the StatsOptions that
are to be (optionally) modified before being passed onto TDFV.
Args:
stats_type: The type of statistics that are to be computed (pre-transform or
post-transform).
stats_options: The configuration to pass to TFDV for computing the desired
statistics.
Returns:
An updated StatsOptions object.
"""
if stats_type == stats_options_util.StatsType.POST_TRANSFORM:
for f in stats_options.schema.feature:
if f.name == _INPUT_WORD_IDS:
# Here we extend the schema for the input_word_ids feature to enable
# NLP statistics to be computed. We pass the vocabulary (_BERT_VOCAB)
# that was used in tokenizing this feature, key tokens of interest
# (e.g. "[CLS]", "[PAD]", "[SEP]", "[UNK]") and key thresholds to
# validate. For more information on the field descriptions, see here:
# https://github.com/tensorflow/metadata/blob/master/
# tensorflow_metadata/proto/v0/schema.proto
text_format.Parse(
"""
vocabulary: "{vocab}"
coverage: {{
min_coverage: 1.0
min_avg_token_length: 3.0
excluded_string_tokens: ["[CLS]", "[PAD]", "[SEP]"]
oov_string_tokens: ["[UNK]"]
}}
token_constraints {{
string_value: "[CLS]"
min_per_sequence: 1
max_per_sequence: 1
min_fraction_of_sequences: 1
max_fraction_of_sequences: 1
}}
token_constraints {{
string_value: "[PAD]"
min_per_sequence: 0
max_per_sequence: {max_pad_per_seq}
min_fraction_of_sequences: 0
max_fraction_of_sequences: 1
}}
token_constraints {{
string_value: "[SEP]"
min_per_sequence: 2
max_per_sequence: 2
min_fraction_of_sequences: 1
max_fraction_of_sequences: 1
}}
token_constraints {{
string_value: "[UNK]"
min_per_sequence: 0
max_per_sequence: {max_unk_per_seq}
min_fraction_of_sequences: 0
max_fraction_of_sequences: 1
}}
""".format(
vocab=_BERT_VOCAB,
max_pad_per_seq=_MAX_LEN - 3, # [CLS], 2x[SEP], Token
max_unk_per_seq=_MAX_LEN - 4 # [CLS], 2x[SEP]
),
f.natural_language_domain)
return stats_options
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
dataset = data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
tf_transform_output.transformed_metadata.schema)
dataset = dataset.repeat()
return dataset.prefetch(tf.data.AUTOTUNE)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True)
model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2, 2e-5)
model.fit(
train_dataset,
epochs=_EPOCHS,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
|
|
#
# Funcparserlib -- A parser library based on parser combinators
# by Andrey Vlasovskikh et al
#
__all__ = [
'a', 'tok', 'many', 'fwd', 'eof', 'maybe', 'skip', 'oneplus',
'name_parser_vars', 'SyntaxError', 'ParserError', 'memoize',
]
from warnings import warn
import logging
from funcparserlib.lexer import *
from funcparserlib.util import SyntaxError
# ----------------
log = logging.getLogger('funcparserlib')
if not hasattr(logging, 'statistics'):
logging.statistics = {}
stats = logging.statistics.setdefault('funcparserlib',
{'memoize': {}})
# ----------------
class ParserError(SyntaxError):
"""User-visible parsing error."""
pass
class GrammarError(Exception):
"""Raised when the grammar definition itself contains errors."""
pass
class _NoParseError(Exception):
"""Internal no-parse exception for backtracking."""
def __init__(self, msg='', state=None):
self.msg = msg
self.state = state
def __str__(self):
return self.msg.encode()
# ----------------
class Parser(object):
"""Base class for various parsers.
It defines some operators for parser composition and the `parse()` function
as its external interface.
"""
def parse(self, tokens):
"""Apply the parser to the tokens and produce the parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
"""
p = left_recursive(self)
if p:
raise GrammarError("Parser '%s' does not halt, remove left "
"recursion from your grammar" %
ebnf_rule(p))
p = non_halting_many(self)
if p:
raise GrammarError("Parser '%s' does not halt, because it "
"contains maybe() or many() inside many()" %
ebnf_rule(p))
for q, opts in non_ll_1_parts(self):
warn('The grammar has a non-LL(1) part that '
'may slow down parsing:\n\n %s\n\n'
'Several alternatives here may start '
'with the same token, '
'possible starting tokens are:\n\n %s\n\n'
'In order to get linear parsing time add memoize() '
'to the biggest common subtree of the '
'alternatives or transform your grammar to LL(1).' %
(ebnf_rule(q), ', '.join(str(x) for x in opts)),
stacklevel=2)
_clear_caches(self)
try:
tree, _ = self(tokens, _State())
log.debug('stats: %r' % stats)
return tree
except _NoParseError as e:
max = e.state.max
tok = tokens[max] if max < len(tokens) else 'eof'
raise ParserError('%s: %s' % (e.msg, tok),
getattr(tok, 'pos', None),
max)
def __call__(self, tokens, s):
return GrammarError('an abstract parser cannot be called')
def __add__(self, other):
"""Return a sequential composition of parsers.
The resulting parser merges the parsed sequence into a single `_Tuple`
unless the user explicitely prevents it. See also `skip()` and `>>`
combinators.
"""
return _Seq(self, other)
def __or__(self, other):
"""Return a choice composition of two parsers."""
return _Alt(self, other)
def __rshift__(self, f):
"""Return an interpreting parser.
Given a function from `b` to `c`, transforms a parser of `b` into a
parser of `c`. It is useful for transorming a parser value into another
value for making it a part of a parse tree or an AST.
"""
return _Map(self, f)
def __repr__(self):
return getattr(self, 'name', self.ebnf())
def named(self, name):
"""Specify the name of the parser."""
self.name = name
return self
def ebnf(self):
"""Get the EBNF grammar expression for the parser."""
return GrammarError('no EBNF expression for an abstract parser')
# ----------------
class _Map(Parser):
"""Interpreting parser."""
def __init__(self, p, f):
self.p = p
self.f = f
def __call__(self, tokens, s):
v, s2 = self.p(tokens, s)
return self.f(v), s2
def ebnf(self):
return str(self.p)
# ----------------
class _Seq(Parser):
"""Sequential composition of parsers."""
def __init__(self, p1, p2):
if isinstance(p1, _Seq):
self.ps = p1.ps + [p2]
else:
self.ps = [p1, p2]
def __call__(self, tokens, s):
def magic(v1, v2):
vs = [v for v in [v1, v2] if not isinstance(v, _Ignored)]
length = len(vs)
if length == 1:
return vs[0]
elif length == 2:
if isinstance(vs[0], _Tuple):
return _Tuple(v1 + (v2,))
else:
return _Tuple(vs)
else:
return _Ignored(())
p, ps = self.ps[0], self.ps[1:]
res, s = p(tokens, s)
for p in ps:
v, s = p(tokens, s)
res = magic(res, v)
return res, s
def ebnf(self):
return ', '.join(ebnf_brackets(str(x)) for x in self.ps)
# ----------------
class _Alt(Parser):
"""Choice composition of parsers."""
def __init__(self, p1, p2):
if isinstance(p1, _Alt):
self.ps = p1.ps + [p2]
else:
self.ps = [p1, p2]
self.toks = None
def __call__(self, tokens, s):
if self.toks is None:
self.toks = []
try:
tss = [first(p) for p in self.ps]
if all(_MEMOIZE not in ts and
len(ts) == len(set(ts)) for ts in tss):
self.toks = [(t, p)
for p, ts in zip(self.ps, tss)
for t in ts]
except GrammarError:
pass
# If there is only 1 possible token for each of the alternatives, then
# optimize the parser lookup
if self.toks:
try:
t = tokens[s.pos]
except IndexError:
raise _NoParseError('no tokens left in the stream', s)
for tok, p in self.toks:
if t == tok:
return p(tokens, s)
for tok, p in self.toks:
if tok ==_EPSYLON:
return p(tokens, s)
raise _NoParseError('got unexpected token', s)
else:
e = _NoParseError('no error', s)
for p in self.ps:
try:
return p(tokens, s)
except _NoParseError as npe:
e = npe
s = _State(s.pos, e.state.max)
continue
raise e
def ebnf(self):
return ' | '.join(ebnf_brackets(str(x)) for x in self.ps)
# ----------------
class _Fwd(Parser):
"""Undefined parser that can be used as a forward declaration.
You will be able to `define()` it when all the parsers it depends on are
available.
"""
def __init__(self):
self.p = None
def define(self, p):
self.p = p
def __call__(self, tokens, s):
if self.p:
return self.p(tokens, s)
else:
raise NotImplementedError('you must define() a fwd')
def uniq(self):
return 'id{}'.format(id(self))
def __repr__(self):
if self.p:
return self.p.__repr__()
else:
return '#<fwd {}>'.format(getattr(self,'name',self.uniq()))
def ebnf(self):
return str(self.p)
# ----------------
class _Eof(Parser):
"""Throws an exception if any tokens are left in the input unparsed."""
def __call__(self, tokens, s):
if s.pos >= len(tokens):
return None, s
else:
raise _NoParseError('eof not found', s)
def ebnf(self):
return '? eof ?'
# ----------------
class _Many(Parser):
"""Repeated application of a parser.
A parser that infinitely applies the parser `p` to the input sequence of
tokens while it successfully parsers them. It returns a list of parsed
values.
"""
def __init__(self, p):
self.p = p
def __call__(self, tokens, s):
# Iterative implementation preventing the stack overflow
res = []
try:
while True:
v, s = self.p(tokens, s)
if not isinstance(v,_Ignored):
res.append(v)
except _NoParseError as e:
return res, _State(s.pos, e.state.max)
def ebnf(self):
return '{ %s }' % self.p
# ----------------
class _Pure(Parser):
"""Pure parser that returns its result without looking at the input."""
def __init__(self, x):
self.x = x
def __call__(self, tokens, s):
return self.x, s
def ebnf(self):
return '? pure(%s) ?' % (self.x,)
# ----------------
class _Tok(Parser):
"""Parses a token equal to the specified token."""
def __init__(self, token):
self.tok = token
def __call__(self, tokens, s):
try:
t = tokens[s.pos]
except IndexError:
raise _NoParseError('expected {}'.format(self.tok), s)
if t == self.tok:
pos = s.pos + 1
s2 = _State(pos, max(pos, s.max))
return t, s2
else:
raise _NoParseError('got unexpected token {}'.format(t),s)
def ebnf(self):
try:
return self.tok.ebnf()
except AttributeError:
return '?%s?' % self.tok
# ----------------
def tok(type, value=None):
return _Tok(Token(type, value))
def a(value,case=True):
return _Tok(Token(None,value,case=case))
# ----------------
class _Memoize(Parser):
def __init__(self, p):
self.p = p
self.cache = {}
self.stats = stats['memoize'][self] = {'hits': 0, 'misses': 0}
def __getattr__(self, name):
return getattr(self.p, name)
def __call__(self, tokens, s):
cache = self.cache
try:
res = cache[s.pos]
self.stats['hits'] += 1
except KeyError:
res = self.p(tokens, s)
cache[s.pos] = res
self.stats['misses'] += 1
return res
def ebnf(self):
return str(self.p)
# ----------------
def _clear_caches(p):
for x in all_parsers(p):
if isinstance(x, _Memoize):
x.cache = {}
# ----------------
class _State(object):
"""Parsing state that is maintained mainly for error reporting.
It consists of the current position `pos` in the sequence being parsed and
the position `max` of the rightmost token that has been consumed while
parsing.
"""
def __init__(self, pos=0, max=0):
self.pos = pos
self.max = max
def __str__(self):
return str((self.pos, self.max))
def __repr__(self):
return '_State(%r, %r)' % (self.pos, self.max)
class _Tuple(tuple): pass
class _Ignored(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return '#<ignored>'
# ----------------
def maybe(p):
"""Return a parser that retuns `None` if parsing fails."""
q = p | pure(None)
q.ebnf = lambda: '[ %s ]' % p
return q
# ----------------
def skip(p):
"""Return a parser such that its results are ignored by the combinator `+`.
It is useful for throwing away elements of concrete syntax (e. g. ",", ";").
"""
return p >> _Ignored
# ----------------
def oneplus(p):
"""Return a parser that applies the parser `p` one or more times."""
return p + many(p) >> (lambda x: [x[:-1]]+x[-1])
# ----------------
def name_parser_vars(vars):
"""Name parsers after their variables.
Named parsers are nice for debugging and error reporting.
The typical usage is to define all the parsers of the grammar in the same
scope and run `name_parser_vars(locals())` to name them all instead of calling
`Parser.named()` manually for each parser.
"""
for k, v in list(vars.items()):
if isinstance(v, Parser):
v.named(k)
# ----------------
def non_halting(p):
"""Return a non-halting part of parser `p` or `None`."""
return left_recursive(p) or non_halting_many(p)
# ----------------
def takewhile_included(pred, seq):
last = False
for x in seq:
if last:
return
elif pred(x):
yield x
else:
last = True
yield x
# ----------------
def left_recursive(p, fwds=[], seqs=[]):
"""Return a left-recursive part of parser `p` or `None`."""
def any_(xs):
return
for x in xs:
if x:
return x
return None
if isinstance(p, (_Map, _Many, _Memoize)):
return left_recursive(p.p, fwds, seqs)
elif isinstance(p, _Fwd):
if p in fwds:
return p
else:
return left_recursive(p.p, [p] + fwds, seqs)
elif isinstance(p, _Seq):
if p in seqs:
return None
else:
left = list(takewhile_included(lambda x: not makes_progress(x),
p.ps))
right = p.ps[len(left):]
return (any_(left_recursive(x, fwds, seqs) for x in left) or
any_(left_recursive(x, [], [p] + seqs) for x in right))
elif isinstance(p, _Alt):
return any_(left_recursive(x, fwds, seqs) for x in p.ps)
else:
return None
# ----------------
def non_halting_many(p):
"""Return a non-halting `many()` part of parser `p` or `None`."""
rs = [x for x in all_parsers(p) if isinstance(x, _Many) and
not makes_progress(x.p)]
return rs[0] if len(rs) > 0 else None
# ----------------
def makes_progress(p, fwds=[]):
"""Assert that the parser must consume some tokens in order to succeed."""
if isinstance(p, (_Map, _Memoize)):
return makes_progress(p.p, fwds)
elif isinstance(p, _Fwd):
if p in fwds:
return False
else:
return makes_progress(p.p, [p] + fwds)
elif isinstance(p, _Seq):
return any(makes_progress(x, fwds) for x in p.ps)
elif isinstance(p, _Alt):
return all(makes_progress(x, fwds) for x in p.ps)
elif isinstance(p, (_Eof, _Tok)):
return True
else:
return False
# ----------------
def ebnf_grammar(p):
"""The EBNF grammar for the parser `p` as the top-level symbol."""
def ebnf_rules(p, ps):
if p in ps:
return [], ps
ps = [p] + ps
if isinstance(p, (_Map, _Fwd, _Many, _Memoize)):
rs, ps = ebnf_rules(p.p, ps)
elif isinstance(p, (_Seq, _Alt)):
rs = []
for x in reversed(p.ps):
new_rs, ps = ebnf_rules(x, ps)
rs.extend(new_rs)
else:
rs = []
if hasattr(p, 'name'):
rs.append(ebnf_rule(p))
return rs, ps
rs, ps = ebnf_rules(p, [])
return '\n'.join(reversed(rs))
# ----------------
def ebnf_rule(p):
return '%s = %s;' % (getattr(p, 'name', 'id%d' % id(p)),
p.ebnf())
# ----------------
def ebnf_brackets(s):
return (s if ' ' not in s or
any(s.startswith(x) for x in '{[(?')
else '(%s)' % s)
# ----------------
def non_ll_1_parts(p):
assert not non_halting(p)
ps = dict((x, [t for t in first(x)
if t != _MEMOIZE])
for x in all_parsers(p)
if isinstance(x, _Alt))
return [(k, v) for k, v in list(ps.items())
if len(v) != len(set(v))]
# ----------------
def all_parsers(p):
def rec(p, fwds=[]):
if isinstance(p, (_Seq, _Alt)):
return sum([rec(x, fwds) for x in p.ps], [p])
elif isinstance(p, (_Many, _Map, _Memoize)):
return [p] + rec(p.p, fwds)
elif isinstance(p, _Fwd):
if p in fwds:
return []
else:
return [p] + rec(p.p, [p] + fwds)
else:
return [p]
return list(set(rec(p)))
# ----------------
def _symbol(s):
return 'symbol', s
_EPSYLON = _symbol('epsylon')
_MEMOIZE = _symbol('memoize')
# ----------------
def first(p):
if isinstance(p, _Tok):
return [p.tok]
elif isinstance(p, _Seq):
res = []
last_epsylon = False
for x in p.ps:
toks = first(x)
res.extend(t for t in toks if t != _EPSYLON)
last_epsylon = _EPSYLON in toks
if not last_epsylon:
break
if last_epsylon:
res.append(_EPSYLON)
return res
elif isinstance(p, _Alt):
return sum([first(x) for x in p.ps], [])
elif isinstance(p, (_Map, _Fwd)):
return first(p.p)
elif isinstance(p, _Many):
return first(p.p) + [_EPSYLON]
elif isinstance(p, _Pure):
return [_EPSYLON]
elif isinstance(p, _Eof):
return []
elif isinstance(p, _Memoize):
return [_MEMOIZE]
else:
raise GrammarError('cannot analyse parser %s' % ebnf_rule(p))
# ----------------
# Aliases for exporting
eof = _Eof()
many = _Many
pure = _Pure
fwd = _Fwd
memoize = _Memoize
|
|
# eng.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0411,C0413,E0611,R0914,W0105,W0611,W0631
# Standard library imports
import collections
import copy
import math
import textwrap
import decimal
from decimal import Decimal
import sys
if sys.hexversion < 0x03000000: # pragma: no cover
from itertools import izip_longest as zip_longest
else: # pragma: no cover
from itertools import zip_longest
# Intra-package imports
import pexdoc.pcontracts
import pexdoc.exh
from pexdoc.ptypes import non_negative_integer
from tests.support.ptypes import (
engineering_notation_number,
engineering_notation_suffix,
)
###
# Global variables
###
_POWER_TO_SUFFIX_DICT = dict(
(exp, prf) for exp, prf in zip(range(-24, 27, 3), "yzafpnum kMGTPEZY")
)
_SUFFIX_TO_POWER_DICT = dict(
(value, key) for key, value in _POWER_TO_SUFFIX_DICT.items()
)
_SUFFIX_POWER_DICT = dict(
(key, float(10 ** value)) for key, value in _SUFFIX_TO_POWER_DICT.items()
)
EngPower = collections.namedtuple("EngPower", ["suffix", "exp"])
NumComp = collections.namedtuple("NumComp", ["mant", "exp"])
###
# Functions
###
def _to_eng_tuple(number):
"""
Return mantissa exponent tuple from a number in engineering notation.
:param number: Number
:type number: integer or float
:rtype: tuple
"""
# pylint: disable=W0141
# Helper function: split integer and fractional part of mantissa
# + ljust ensures that integer part in engineering notation has
# at most 3 digits (say if number given is 1E4)
# + rstrip ensures that there is no empty fractional part
split = lambda x, p: (x.ljust(3 + neg, "0")[:p], x[p:].rstrip("0"))
# Convert number to scientific notation, a "constant" format
mant, exp = to_scientific_tuple(number)
mant, neg = mant.replace(".", ""), mant.startswith("-")
# New values
new_mant = ".".join(filter(None, split(mant, 1 + (exp % 3) + neg)))
new_exp = int(3 * math.floor(exp / 3))
return NumComp(new_mant, new_exp)
@pexdoc.pcontracts.contract(
number="int|float", frac_length="non_negative_integer", rjust=bool
)
def peng(number, frac_length, rjust=True):
r"""
Convert a number to engineering notation.
The absolute value of the number (if it is not exactly zero) is bounded to
the interval [1E-24, 1E+24)
:param number: Number to convert
:type number: integer or float
:param frac_length: Number of digits of fractional part
:type frac_length: :ref:`NonNegativeInteger`
:param rjust: Flag that indicates whether the number is
right-justified (True) or not (False)
:type rjust: boolean
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng
:raises:
* RuntimeError (Argument \`frac_length\` is not valid)
* RuntimeError (Argument \`number\` is not valid)
* RuntimeError (Argument \`rjust\` is not valid)
.. [[[end]]]
The supported engineering suffixes are:
+----------+-------+--------+
| Exponent | Name | Suffix |
+==========+=======+========+
| 1E-24 | yocto | y |
+----------+-------+--------+
| 1E-21 | zepto | z |
+----------+-------+--------+
| 1E-18 | atto | a |
+----------+-------+--------+
| 1E-15 | femto | f |
+----------+-------+--------+
| 1E-12 | pico | p |
+----------+-------+--------+
| 1E-9 | nano | n |
+----------+-------+--------+
| 1E-6 | micro | u |
+----------+-------+--------+
| 1E-3 | milli | m |
+----------+-------+--------+
| 1E+0 | | |
+----------+-------+--------+
| 1E+3 | kilo | k |
+----------+-------+--------+
| 1E+6 | mega | M |
+----------+-------+--------+
| 1E+9 | giga | G |
+----------+-------+--------+
| 1E+12 | tera | T |
+----------+-------+--------+
| 1E+15 | peta | P |
+----------+-------+--------+
| 1E+18 | exa | E |
+----------+-------+--------+
| 1E+21 | zetta | Z |
+----------+-------+--------+
| 1E+24 | yotta | Y |
+----------+-------+--------+
For example:
>>> import putil.eng
>>> putil.eng.peng(1235.6789E3, 3, False)
'1.236M'
"""
# The decimal module has a to_eng_string() function, but it does not seem
# to work well in all cases. For example:
# >>> decimal.Decimal('34.5712233E8').to_eng_string()
# '3.45712233E+9'
# >>> decimal.Decimal('34.57122334E8').to_eng_string()
# '3457122334'
# It seems that the conversion function does not work in all cases
#
# Return formatted zero if number is zero, easier to not deal with this
# special case through the rest of the algorithm
if number == 0:
number = "0.{zrs}".format(zrs="0" * frac_length) if frac_length else "0"
# Engineering notation numbers can have a sign, a 3-digit integer part,
# a period, and a fractional part of length frac_length, so the
# length of the number to the left of, and including, the period is 5
return "{0} ".format(number.rjust(5 + frac_length)) if rjust else number
# Low-bound number
sign = +1 if number >= 0 else -1
ssign = "-" if sign == -1 else ""
anumber = abs(number)
if anumber < 1e-24:
anumber = 1e-24
number = sign * 1e-24
# Round fractional part if requested frac_length is less than length
# of fractional part. Rounding method is to add a '5' at the decimal
# position just after the end of frac_length digits
exp = 3.0 * math.floor(math.floor(math.log10(anumber)) / 3.0)
mant = number / 10 ** exp
# Because exponent is a float, mantissa is a float and its string
# representation always includes a period
smant = str(mant)
ppos = smant.find(".")
if len(smant) - ppos - 1 > frac_length:
mant += sign * 5 * 10 ** (-frac_length - 1)
if abs(mant) >= 1000:
exp += 3
mant = mant / 1e3
smant = str(mant)
ppos = smant.find(".")
# Make fractional part have frac_length digits
bfrac_length = bool(frac_length)
flength = ppos - (not bfrac_length) + frac_length + 1
new_mant = smant[:flength].ljust(flength, "0")
# Upper-bound number
if exp > 24:
new_mant, exp = (
"{sign}999.{frac}".format(sign=ssign, frac="9" * frac_length),
24,
)
# Right-justify number, engineering notation numbers can have a sign,
# a 3-digit integer part and a period, and a fractional part of length
# frac_length, so the length of the number to the left of the
# period is 4
new_mant = new_mant.rjust(rjust * (4 + bfrac_length + frac_length))
# Format number
num = "{mant}{suffix}".format(
mant=new_mant, suffix=_POWER_TO_SUFFIX_DICT[exp] if exp else " " * bool(rjust)
)
return num
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_float(snum):
r"""
Return floating point equivalent of a number in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_float
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_float(putil.eng.peng(1235.6789E3, 3, False))
1236000.0
"""
# This can be coded as peng_mant(snum)*(peng_power(snum)[1]), but the
# "function unrolling" is about 4x faster
snum = snum.rstrip()
power = _SUFFIX_POWER_DICT[" " if snum[-1].isdigit() else snum[-1]]
return float(snum if snum[-1].isdigit() else snum[:-1]) * power
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_frac(snum):
r"""
Return the fractional part of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: integer
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_frac
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_frac(putil.eng.peng(1235.6789E3, 3, False))
236
"""
snum = snum.rstrip()
pindex = snum.find(".")
if pindex == -1:
return 0
return int(snum[pindex + 1 :] if snum[-1].isdigit() else snum[pindex + 1 : -1])
def peng_int(snum):
r"""
Return the integer part of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: integer
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_int
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_int(putil.eng.peng(1235.6789E3, 3, False))
1
"""
return int(peng_mant(snum))
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_mant(snum):
r"""
Return the mantissa of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_mant
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_mant(putil.eng.peng(1235.6789E3, 3, False))
1.236
"""
snum = snum.rstrip()
return float(snum if snum[-1].isdigit() else snum[:-1])
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_power(snum):
r"""
Return eng. suffix and its floating point equivalent of a number in eng. notation.
:py:func:`putil.eng.peng` lists the correspondence between suffix and
floating point exponent.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: named tuple in which the first item is the engineering suffix and
the second item is the floating point equivalent of the suffix
when the number is represented in engineering notation.
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_power
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_power(putil.eng.peng(1235.6789E3, 3, False))
EngPower(suffix='M', exp=1000000.0)
"""
suffix = " " if snum[-1].isdigit() else snum[-1]
return EngPower(suffix, _SUFFIX_POWER_DICT[suffix])
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_suffix(snum):
r"""
Return the suffix of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_suffix
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_suffix(putil.eng.peng(1235.6789E3, 3, False))
'M'
"""
snum = snum.rstrip()
return " " if snum[-1].isdigit() else snum[-1]
@pexdoc.pcontracts.contract(suffix="engineering_notation_suffix", offset=int)
def peng_suffix_math(suffix, offset):
r"""
Return engineering suffix from a starting suffix and an number of suffixes offset.
:param suffix: Engineering suffix
:type suffix: :ref:`EngineeringNotationSuffix`
:param offset: Engineering suffix offset
:type offset: integer
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.eng.functions.peng_suffix_math
:raises:
* RuntimeError (Argument \`offset\` is not valid)
* RuntimeError (Argument \`suffix\` is not valid)
* ValueError (Argument \`offset\` is not valid)
.. [[[end]]]
For example:
>>> import putil.eng
>>> putil.eng.peng_suffix_math('u', 6)
'T'
"""
# pylint: disable=W0212
eobj = pexdoc.exh.addex(ValueError, "Argument `offset` is not valid")
try:
return _POWER_TO_SUFFIX_DICT[_SUFFIX_TO_POWER_DICT[suffix] + 3 * offset]
except KeyError:
eobj(True)
def to_scientific_string(number, frac_length=None, exp_length=None, sign_always=False):
"""
Convert number or number string to a number string in scientific notation.
Full precision is maintained if
the number is represented as a string
:param number: Number to convert
:type number: number or string
:param frac_length: Number of digits of fractional part, None indicates
that the fractional part of the number should not be
limited
:type frac_length: integer or None
:param exp_length: Number of digits of the exponent; the actual length of
the exponent takes precedence if it is longer
:type exp_length: integer or None
:param sign_always: Flag that indicates whether the sign always
precedes the number for both non-negative and negative
numbers (True) or only for negative numbers (False)
:type sign_always: boolean
:rtype: string
For example:
>>> import putil.eng
>>> putil.eng.to_scientific_string(333)
'3.33E+2'
>>> putil.eng.to_scientific_string(0.00101)
'1.01E-3'
>>> putil.eng.to_scientific_string(99.999, 1, 2, True)
'+1.0E+02'
"""
exp_length = 0 if not exp_length else exp_length
mant, exp = to_scientific_tuple(number)
fmant = float(mant)
if (not frac_length) or (fmant == int(fmant)):
return "{sign}{mant}{period}{zeros}E{exp_sign}{exp}".format(
sign="+" if sign_always and (fmant >= 0) else "",
mant=mant,
period="." if frac_length else "",
zeros="0" * frac_length if frac_length else "",
exp_sign="-" if exp < 0 else "+",
exp=str(abs(exp)).rjust(exp_length, "0"),
)
rounded_mant = round(fmant, frac_length)
# Avoid infinite recursion when rounded mantissa is _exactly_ 10
if abs(rounded_mant) == 10:
rounded_mant = fmant = -1.0 if number < 0 else 1.0
frac_length = 1
exp = exp + 1
zeros = 2 + (1 if (fmant < 0) else 0) + frac_length - len(str(rounded_mant))
return "{sign}{mant}{zeros}E{exp_sign}{exp}".format(
sign="+" if sign_always and (fmant >= 0) else "",
mant=rounded_mant,
zeros="0" * zeros,
exp_sign="-" if exp < 0 else "+",
exp=str(abs(exp)).rjust(exp_length, "0"),
)
def to_scientific_tuple(number):
"""
Return mantissa and exponent of a number in scientific notation.
Full precision is maintained if the number is
represented as a string
:param number: Number
:type number: integer, float or string
:rtype: named tuple in which the first item is the mantissa (*string*)
and the second item is the exponent (*integer*) of the number
when expressed in scientific notation
For example:
>>> import putil.eng
>>> putil.eng.to_scientific_tuple('135.56E-8')
NumComp(mant='1.3556', exp=-6)
>>> putil.eng.to_scientific_tuple(0.0000013556)
NumComp(mant='1.3556', exp=-6)
"""
# pylint: disable=W0632
convert = not isinstance(number, str)
# Detect zero and return, simplifies subsequent algorithm
if (convert and (number == 0)) or (
(not convert) and (not number.strip("0").strip("."))
):
return ("0", 0)
# Break down number into its components, use Decimal type to
# preserve resolution:
# sign : 0 -> +, 1 -> -
# digits: tuple with digits of number
# exp : exponent that gives null fractional part
sign, digits, exp = Decimal(str(number) if convert else number).as_tuple()
mant = (
"{sign}{itg}{frac}".format(
sign="-" if sign else "",
itg=digits[0],
frac=(
".{frac}".format(frac="".join([str(num) for num in digits[1:]]))
if len(digits) > 1
else ""
),
)
.rstrip("0")
.rstrip(".")
)
exp += len(digits) - 1
return NumComp(mant, exp)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_11_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class StringParameterDefinition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'default_parameter_value': 'StringParameterValue',
'description': 'str',
'name': 'str',
'type': 'str'
}
attribute_map = {
'_class': '_class',
'default_parameter_value': 'defaultParameterValue',
'description': 'description',
'name': 'name',
'type': 'type'
}
def __init__(self, _class=None, default_parameter_value=None, description=None, name=None, type=None, local_vars_configuration=None): # noqa: E501
"""StringParameterDefinition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.__class = None
self._default_parameter_value = None
self._description = None
self._name = None
self._type = None
self.discriminator = None
if _class is not None:
self._class = _class
if default_parameter_value is not None:
self.default_parameter_value = default_parameter_value
if description is not None:
self.description = description
if name is not None:
self.name = name
if type is not None:
self.type = type
@property
def _class(self):
"""Gets the _class of this StringParameterDefinition. # noqa: E501
:return: The _class of this StringParameterDefinition. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this StringParameterDefinition.
:param _class: The _class of this StringParameterDefinition. # noqa: E501
:type _class: str
"""
self.__class = _class
@property
def default_parameter_value(self):
"""Gets the default_parameter_value of this StringParameterDefinition. # noqa: E501
:return: The default_parameter_value of this StringParameterDefinition. # noqa: E501
:rtype: StringParameterValue
"""
return self._default_parameter_value
@default_parameter_value.setter
def default_parameter_value(self, default_parameter_value):
"""Sets the default_parameter_value of this StringParameterDefinition.
:param default_parameter_value: The default_parameter_value of this StringParameterDefinition. # noqa: E501
:type default_parameter_value: StringParameterValue
"""
self._default_parameter_value = default_parameter_value
@property
def description(self):
"""Gets the description of this StringParameterDefinition. # noqa: E501
:return: The description of this StringParameterDefinition. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this StringParameterDefinition.
:param description: The description of this StringParameterDefinition. # noqa: E501
:type description: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this StringParameterDefinition. # noqa: E501
:return: The name of this StringParameterDefinition. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StringParameterDefinition.
:param name: The name of this StringParameterDefinition. # noqa: E501
:type name: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this StringParameterDefinition. # noqa: E501
:return: The type of this StringParameterDefinition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this StringParameterDefinition.
:param type: The type of this StringParameterDefinition. # noqa: E501
:type type: str
"""
self._type = type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StringParameterDefinition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StringParameterDefinition):
return True
return self.to_dict() != other.to_dict()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import template
from django.template.defaultfilters import title
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext as _
from horizon import api
from horizon import tables
from horizon.templatetags import sizeformat
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
class TerminateInstance(tables.BatchAction):
name = "terminate"
action_present = _("Terminate")
action_past = _("Terminated")
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ('danger',)
def action(self, request, obj_id):
api.server_delete(request, obj_id)
class RebootInstance(tables.BatchAction):
name = "reboot"
action_present = _("Reboot")
action_past = _("Rebooted")
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ('danger',)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
def action(self, request, obj_id):
api.server_reboot(request, obj_id)
class TogglePause(tables.BatchAction):
name = "pause"
action_present = (_("Pause"), _("Unpause"))
action_past = (_("Paused"), _("Unpaused"))
data_type_singular = _("Instance")
data_type_plural = _("Instances")
def allowed(self, request, instance=None):
self.paused = False
if not instance:
return self.paused
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
return instance.status in ACTIVE_STATES or self.paused
def action(self, request, obj_id):
if self.paused:
api.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
action_present = (_("Suspend"), _("Resume"))
action_past = (_("Suspended"), _("Resumed"))
data_type_singular = _("Instance")
data_type_plural = _("Instances")
def allowed(self, request, instance=None):
self.suspended = False
if not instance:
self.suspended
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
return instance.status in ACTIVE_STATES or self.suspended
def action(self, request, obj_id):
if self.suspended:
api.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:nova:images_and_snapshots:index"
attrs = {"class": "btn small"}
class EditInstance(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:nova:instances_and_volumes:instances:update"
attrs = {"class": "ajax-modal"}
class SnapshotLink(tables.LinkAction):
name = "snapshot"
verbose_name = _("Snapshot")
url = "horizon:nova:images_and_snapshots:snapshots:create"
attrs = {"class": "ajax-modal"}
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
class ConsoleLink(tables.LinkAction):
name = "console"
verbose_name = _("VNC Console")
url = "horizon:nova:instances_and_volumes:instances:vnc"
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
class LogLink(tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:nova:instances_and_volumes:instances:console"
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
class UpdateRow(tables.UpdateAction):
def get_data(self, request, instance_id):
instance = api.server_get(request, instance_id)
flavors = api.flavor_list(request)
keyed_flavors = [(str(flavor.id), flavor) for flavor in flavors]
instance.full_flavor = SortedDict(keyed_flavors)[instance.flavor["id"]]
return instance
def get_ips(instance):
template_name = 'nova/instances_and_volumes/instances/_instance_ips.html'
context = {"instance": instance}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(RAM)s RAM | %(VCPU)s VCPU | %(disk)s Disk")
vals = {'RAM': sizeformat.mbformat(instance.full_flavor.ram),
'VCPU': instance.full_flavor.vcpus,
'disk': sizeformat.diskgbformat(instance.full_flavor.disk)}
return size_string % vals
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
name = tables.Column("name", link="horizon:nova:instances_and_volumes:" \
"instances:detail")
ip = tables.Column(get_ips, verbose_name=_("IP Address"))
size = tables.Column(get_size, verbose_name=_("Size"))
status = tables.Column("status", filters=(title,))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
filters=(title,),
status=True,
status_choices=TASK_STATUS_CHOICES)
state = tables.Column(get_power_state,
filters=(title,),
verbose_name=_("Power State"))
class Meta:
name = "instances"
verbose_name = _("Instances")
status_column = "task"
table_actions = (LaunchLink, TerminateInstance)
row_actions = (EditInstance, ConsoleLink, LogLink, SnapshotLink,
TogglePause, ToggleSuspend, RebootInstance,
TerminateInstance, UpdateRow)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, head_object,
InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.utils import (
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
from swift.common.wsgi import ConfigString
# The default internal client config body is to support upgrades without
# requiring deployment of the new /etc/swift/internal-client.conf
ic_conf_body = """
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host =
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[pipeline:main]
pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
# See proxy-server.conf-sample for options
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options
""".lstrip()
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, logger=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = logger or get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a container sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Per container stats. These are collected per container.
#: puts - the number of puts that were done for the container
#: deletes - the number of deletes that were fot the container
#: bytes - the total number of bytes transferred per the container
self.container_stats = collections.defaultdict(int)
self.container_stats.clear()
#: Time of last stats report.
self.reported = time()
self.swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(self.swift_dir,
ring_name='container')
bind_ip = conf.get('bind_ip', '0.0.0.0')
self._myips = whataremyips(bind_ip)
self._myport = int(conf.get('bind_port', 6201))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.conn_timeout = float(conf.get('conn_timeout', 5))
request_tries = int(conf.get('request_tries') or 3)
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
_('Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options'))
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
try:
self.swift = InternalClient(
internal_client_conf, 'Swift Container Sync', request_tries)
except IOError as err:
if err.errno != errno.ENOENT:
raise
raise SystemExit(
_('Unable to load internal client from config: '
'%(conf)r (%(error)s)')
% {'conf': internal_client_conf_path, 'error': err})
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_stats.clear()
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_report(self, start, end, sync_point1, sync_point2, info,
max_row):
self.logger.info(_('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s'),
{'container': '%s/%s' % (info['account'],
info['container']),
'start': start,
'end': end,
'puts': self.container_stats['puts'],
'posts': 0,
'deletes': self.container_stats['deletes'],
'bytes': self.container_stats['bytes'],
'point1': sync_point1,
'point2': sync_point2,
'total': max_row})
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if is_local_device(self._myips, self._myport,
node['ip'], node['port']):
break
else:
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.items():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
start_at = time()
stop_at = start_at + self.container_time
next_sync_point = None
sync_stage_time = start_at
try:
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
# This node will only initially sync out one third
# of the objects (if 3 replicas, 1/4 if 4, etc.)
# and will skip problematic rows as needed in case of
# faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the
# nodes didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None,
next_sync_point)
else:
next_sync_point = sync_point2
sync_stage_time = time()
while sync_stage_time < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of
# the objects (if 3 replicas, 1/4 if 4, etc.).
# It'll come back around to the section above
# and attempt to sync previously skipped rows in case
# the other nodes didn't succeed or in case it failed
# to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
sync_stage_time = time()
self.container_syncs += 1
self.logger.increment('syncs')
finally:
self.container_report(start_at, sync_stage_time,
sync_point1,
next_sync_point,
info, broker.get_max_row())
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
realm, realm_key, method, headers):
"""
Updates container sync headers
:param name: The name of the object
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param method: HTTP method to create sig with
:param headers: headers to update with container sync headers
"""
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(name)
sig = self.realms_conf.get_sig(method, path,
headers.get('x-timestamp', 0),
nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (realm,
nonce,
sig)
else:
headers['x-container-sync-key'] = user_key
def _object_in_remote_container(self, name, sync_to, user_key,
realm, realm_key, timestamp):
"""
Performs head object on remote to eliminate extra remote put and
local get object calls
:param name: The name of the object in the updated row in the local
database triggering the sync update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param timestamp: last modified date of local object
:returns: True if object already exists in remote
"""
headers = {'x-timestamp': timestamp.internal}
self._update_sync_to_headers(name, sync_to, user_key, realm,
realm_key, 'HEAD', headers)
try:
metadata, _ = head_object(sync_to, name=name,
headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
retries=0)
remote_ts = Timestamp(metadata.get('x-timestamp', 0))
self.logger.debug("remote obj timestamp %s local obj %s" %
(timestamp.internal, remote_ts.internal))
if timestamp <= remote_ts:
return True
# Object in remote should be updated
return False
except ClientException as http_err:
# Object not in remote
if http_err.http_status == 404:
return False
raise http_err
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
Update can be either delete or put.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
# extract last modified time from the created_at value
ts_data, ts_ctype, ts_meta = decode_timestamps(
row['created_at'])
if row['deleted']:
# when sync'ing a deleted object, use ts_data - this is the
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
self._update_sync_to_headers(row['name'], sync_to,
user_key, realm, realm_key,
'DELETE', headers)
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
timeout=self.conn_timeout)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.container_stats['deletes'] += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
if self._object_in_remote_container(row['name'],
sync_to, user_key, realm,
realm_key, ts_meta):
return True
exc = None
# look up for the newest one
headers_out = {'X-Newest': True,
'X-Backend-Storage-Policy-Index':
str(info['storage_policy_index'])}
try:
source_obj_status, headers, body = \
self.swift.get_object(info['account'],
info['container'], row['name'],
headers=headers_out,
acceptable_statuses=(2, 4))
except (Exception, UnexpectedResponse, Timeout) as err:
headers = {}
body = None
exc = err
timestamp = Timestamp(headers.get('x-timestamp', 0))
if timestamp < ts_meta:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: '
'%(account)r %(container)r %(object)r'),
{'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
self._update_sync_to_headers(row['name'], sync_to, user_key,
realm, realm_key, 'PUT', headers)
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,
timeout=self.conn_timeout)
self.container_puts += 1
self.container_stats['puts'] += 1
self.container_stats['bytes'] += row['size']
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout) as err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
|
|
# coding: utf-8
# ### Could not find kernel error?
#
# If you ever see a "could not find a kernel for this notebook" error message, it will offer you a pull down menu for you to pick a fitting kernel.
#
# Remember, `kernels` are the notebook's way to find the correct interpretor for the code you write into notebook cells. And these days this can be `R`, `Julia`, `Python` and several other things (Find the available kernels list [here](https://github.com/ipython/ipython/wiki/IPython-kernels-for-other-languages)).
# ### Review on your own time: A few "last" things about types
We did some examples last class which illustrated the difference between integers and floats.
Let's do one more using the conversion between Fahrenheit and Celsius as a test case.
Recall the conversion formula:
# $T_C = \frac{5}{9} \left(T_F - 32 \right)$
# In[ ]:
tempF = 212.0
tempC = (5 / 9) * (tempF - 32.0)
tempC
# #### Q. What will be printed?
Depending on Python version!
# #### Q. What went wrong?
Nothing in Python 3, yay! ;)
# ### You can force variables to be certain types
# In[ ]:
x = 45
type(x) # Gives (returns) the type of variable
# In[ ]:
x = float(x)
print(type(x))
x
# #### Q. What will this produce?
# In[ ]:
x = 26.9
int(x)
# ### Review continued: One "last" note on modules
# In[ ]:
from math import *
import math
print(sqrt(2))
math.sqrt(2)
# It's using the exact same library twice, you just told Python 2 different ways to get to it.
# And there's even a way to prove it: With the `id()` function:
# In[ ]:
id(sqrt)
# In[ ]:
id(math.sqrt)
# As you can see it's the same memory address (but this number is not necessarily the same on your computer), meaning the Python interpreter uses the exact same object twice, you just gave it 2 different names.
# Another syntax is available to import modules:
# In[ ]:
# Import math module and give it a new name
import math as m # Note the use of "as", a reserved word
m.sqrt(2)
# or specific functions within a module:
# In[ ]:
# Import sqrt from math and give it a new name
from math import sqrt as sq
from math import pi as PIE
sq(2)
Recap of importing styles:
from module import function
from module import *
import module
import module as mod
from module import function as func
# # Today: Loops & Lists
# The point of loops is to compactly code repetitive tasks.
# For example, computing the gravitational force for multiple planetary masses.
#
# Loops are an essential programming tool (this is why we program!).
# Python supports two types of loops:
#
# 1. while loops
# 2. for loops
# ### While Loops (Section 2.1.2 in the book)
# #### Basic While Loop
#
# <Talk about how Python knows what's in the loop>
# In[ ]:
x = 0 # Initialize the variable x to 0
while(x != 3): # While (as long as) x is not equal to 3
print("The value of x is", x) # Print this to the screen
x += 1 # Increment x by 1 (add 1 to x)
# REPEAT!!!
# In[ ]:
print(x)
# What is the value of x?
# #### Without a while loop
# In[ ]:
x = 0 # Initialize the variable x to 0
print("The value of x is", x) # Print this to the screen
x += 1 # Increment x by 1 (add 1 to x)
print("The value of x is", x) # Print this to the screen
x += 1 # Increment x by 1 (add 1 to x)
print("The value of x is", x) # Print this to the screen
x += 1 # Increment x by 1 (add 1 to x)
# Recall the Gravitational Force Equation
# $$F(r) = G \frac{m_1 m_2}{r^2}$$
# In[ ]:
print('# Table of Gravitational Forces for Multiple Planet Masses\n')
# Initialize variables - use meters and kilograms for units
G = 6.67e-11 # Gravitational constant
mass_earth = 5.97e24 # Earth mass
mass_person = 70 # Person mass
radius_earth = 6.37e6 # Earth radius
# Begin calculation
mass1 = mass_earth
# Print a header
print('# mass1/mass_earth Force')
# The loop ends when conditional mass1 <= (10.0 * massEarth) is no longer true
while(mass1 <= (10.0 * mass_earth)): # Note the colon!
force = G * mass1 * mass_person / radius_earth**2 # All lines in the loop must be indented by
# the same amount (iPython does it automatically)
# print(str(mass1 / mass_earth) + " " + str(force))
print("{mass_ratio}\t{force:7.2f}".format(mass_ratio=mass1 / mass_earth,
force=force))
mass1 = mass1 + mass_earth # Increment by Earth's mass
# No indent! This line is executed after the loop is done
print('# Done')
# #### Q. What will this loop do ("trace" it)?
# The increment could have been done in shorthand
# In[ ]:
# Note that I have to reset mass1 here!!
mass1 = mass_earth
print('# mass1/mass_earth Force')
while(mass1 <= (10.0 * mass_earth)):
force = G * mass1 * mass_person / radius_earth**2
print("{:18.1f} {:7.2f}".format(mass1 / mass_earth, force))
# mass1 = mass1 + mass_earth
mass1 += mass_earth # Shorthand version of the line above.
'# Done'
# #### Q. What about this one? Can you predict any problems it may cause?
# Example 1
x = 0
while(True):
x = x + 1
# Example 2
x = 0
while(x >= -1):
x = x + 1
NEVER, EVER DO THIS!! (well, not EXACTLY like this...)
# ### Infinite loops
If you create a while loop and the conditional never becomes false, you have just made yourself an infinite loop!
If you accidentally make an infinite loop in iPython notebook, go to "Kernel" then "Interrupt" in the toolbar above, then go to "Kernel" then "Restart".
# In[ ]:
# How to prevent an infinite loop
maxCount = 10 # A number that is more than your loop should ever do
count = 0 # The current number your loop is on
# Adding "and < maxCount" to the end of your conditional prevents infinite loops
while(True and count < maxCount):
print("Loop count: " + str(count))
count += 1 # Increment your current loop count
# #### Q. How does this work?
Remember the basic structure of a while loop:
while <conditional statement>:
<commands indented by 1 tab (usually 3 or 4 spaces)>
<more commands>
<more commands>
<...>
<eventually exit loop and return to no indent>
*The <conditional statement> must evaluate to True or False.*
# ### INTERLUDE: Boolean (logic) expressions (Section 2.1.3)
# Boolean expressions are conditional statements. There are only
# two possible values: True or False
#
# I've capitalized True and False because these are reserved words in Python.
#
x == y # Is x equal to y? (remember, a single = symbol is used to assign values)
#
x != y # Is x not equal to y?
#
x >= y # Is x greater than or equal to y?
#
x <= y # Is x less than or equal to y?
#
x < y # Is x less than y?
#
x > y # Is x greater than y?
# #### Q. What is the value of this?
# In[ ]:
5 <= 10
# #### Q. What is the value of this?
# In[ ]:
5 >= 10
The reserved word "not" can be inserted in front of boolean expressions to change the value
to its opposite
# In[ ]:
not 5 >= 10
# #### Q. What is the value of this?
# See how readable Python is?
# Boolean expressions can be combined with "and", "or" and "not" to form compound conditional expressions.
# In[ ]:
5 <= 10 and 5 >= 10
# #### Q. How about this?
# In[ ]:
5 <= 10 or 5 >= 10
# ### Back to while loops
While loops are good to use when you don't know exactly how many times you need your loop to run.
They are very useful when asking the user for input.
# #### Example - User Input
# In[ ]:
import random
minNumber = 1
maxNumber = 10
# Get a random number between 1 and 10
randomNumber = random.randint(minNumber, maxNumber)
userGuess = -1
while(userGuess != randomNumber):
userPrompt = "Guess a number between " + str(minNumber) + " and " + str(maxNumber) + ": "
userGuess = input(userPrompt) # Prompt the user
userGuess = int(userGuess)
print("You have guessed the correct number! " + str(userGuess))
# #### Q. What happens if you enter a letter instead of a number?
# ### Lists (Section 2.2)
Lists are sequences of objects (which can be of different types) in a given order.
To define a list of mass ratios with ten elements
** (and indices running from 0 to 9): **
Referring to our previous gravitational force example:
# In[ ]:
massRatio = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
massRatio
We can access an element of the list by supplying its
index in SQUARE BRACKETS (not parentheses or braces):
# In[ ]:
massRatio[3]
# #### Q. What will this print?
# In[ ]:
type(massRatio[3])
# Lesson learned: Python is zero-index based
# ### Modifying lists
We can append an element to the end of a list
using the append "method":
# In[ ]:
massRatio.append(11.0)
massRatio
Note the syntax "object.method(argument)"
Append acts like a function, but it is reached through an object.
That object (which we created) is a list called massRatio.We can insert a new element at a specific location too:
# In[ ]:
# This inserts 4.5 into index 4 of the list:
massRatio.insert(4, 4.5)
massRatio
We can delete an element:
# In[ ]:
del massRatio[4]
# #### Q. What will the next line produce?
# In[ ]:
massRatio
# ### List operations
# In[ ]:
# We can find out its length with len(object)
len(massRatio)
# Python uses [] to access elements and () to perform a function on an object.
Lists can be added:
# In[ ]:
massRatio = massRatio + [12.0, 13.0, 14.0]
massRatio
which is equivalent to using the method "extend":
# In[ ]:
massRatio.extend([15.0, 16.0, 17.0])
print("Extend", massRatio)
massRatio.append([18.0, 19.0, 20.0])
print("Append", massRatio)
print(massRatio[17][1])
The "index" function returns the index of the first appearance of a value
# #### Q. What will this produce?
# In[ ]:
massRatio.index(12.0)
# In[ ]:
# And, this fails
massRatio.index(20.0)
The "in" keyword:
# In[ ]:
# We can check if there is an element in a list. The result of the check
# is boolean: True or False.
14.0 in massRatio
# In[ ]:
99.0 in massRatio
# In[ ]:
massRatio
Negative indices:
# In[ ]:
# Negative indices start counting from the right (the end) of a list:
massRatio[-4]
# #### Q. What will this give us?
# ### Creating lists with while loops
We can create lists using a while loop.
Again, this is useful when you don't know how many elements
are going to be put in the list.
# In[ ]:
# Initializations first
massRatio = [] # Creates an empty list
massRatioValue = 1.0 # For the conditional
massRatioMax = 5.0 # Also for the conditional
userInput = "BIG NOPE"
# And the while loop
while(userInput != "N" and massRatioValue <= massRatioMax): # Remember the colon!
# Remember to indent!
massRatio.append(massRatioValue)
massRatioValue += 1.0
userInput = input("Add another mass ratio value? ")
userInput = userInput.upper()
print("Finished creating the list massRatio!")
# #### Q. What is massRatio?
# In[ ]:
massRatio
# In[ ]:
|
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import serializers
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from portal.models import MediaItem, Comment, Channel, Collection, Submittal, MediaFile
from portal.forms import MediaItemForm, CommentForm, getThumbnails, ThumbnailForm, SubmittalForm
from portal.media_formats import MEDIA_FORMATS
from portal.templatetags.custom_filters import seconds_to_hms
from taggit.models import Tag
import lambdaproject.settings as settings
import djangotasks
import os
import re
from operator import attrgetter
import itertools
def index(request):
''' This view is the front page of OwnTube. It just gets the first 15 available media items and
forwards them to the template. We use Django's Paginator to have pagination '''
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,16)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/latest/"+file_type))
rss_list.append(('torrent','torrent','/feeds/latest/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/index.html', {'latest_mediaitems_list': mediaitems, 'channel_list': channel_list, 'rss_list': rss_list})
def channel_list(request,slug):
''' This view is the view for the channel's list it works almost like the index view'''
channel = get_object_or_404(Channel, slug=slug)
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,15)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/"+channel.slug+"/"+file_type))
rss_list.append(('torrent','torrent','/feeds/'+channel.slug+'/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/channel.html', {'mediaitems_list': mediaitems, 'channel': channel, 'channel_list': channel_list, 'rss_list': rss_list})
@login_required
def get_duration(request, slug):
mediaitem = get_object_or_404(MediaItem, slug=slug)
if mediaitem.get_and_save_duration():
duration_feedback = seconds_to_hms(mediaitem.duration)
else:
duration_feedback = "Error"
return HttpResponse(duration_feedback)
def detail(request, slug):
''' Handles the detail view of a media item (the player so to say) and handles the comments (this should become nicer with AJAX and stuff)'''
mediaitem = get_object_or_404(MediaItem, slug=slug)
if request.user.is_authenticated():
comment_list = Comment.objects.filter(item=mediaitem).order_by('-created')
else:
comment_list = Comment.objects.filter(item=mediaitem,moderated=True).order_by('-created')
if request.method == 'POST':
comment = Comment(item=mediaitem,ip=request.META["REMOTE_ADDR"])
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(commit=False)
comment.save()
message = _(u"Your comment will be moderated")
comment.send_notification_mail()
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': CommentForm(), 'message': message})
else:
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': form})
else:
form = CommentForm()
return TemplateResponse(request, 'portal/items/detail.html', {'mediaitem': mediaitem, 'comment_list': comment_list, 'comment_form': form})
def iframe(request, slug):
''' Returns an iframe for a item so that media items can be shared easily '''
mediaitem = get_object_or_404(MediaItem, slug=slug)
return TemplateResponse(request, 'portal/items/iframe.html', {'mediaitem': mediaitem})
def tag(request, tag):
''' Gets all media items for a specified tag'''
if request.user.is_authenticated():
mediaitemslist = MediaItem.objects.filter(encodingDone=True, tags__slug__in=[tag]).order_by('-date')
else:
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__slug__in=[tag]).order_by('-date')
tag_name = get_object_or_404(Tag, slug=tag)
return TemplateResponse(request, 'portal/items/list.html', {'mediaitems_list': mediaitemslist, 'tag': tag_name})
def collection(request, slug):
''' Gets all media items for a channel'''
collection = get_object_or_404(Collection, slug=slug)
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/collection/"+collection.slug+"/"+file_type))
if request.user.is_authenticated():
mediaitemslist = collection.items.filter(encodingDone=True)
else:
mediaitemslist = collection.items.filter(encodingDone=True, published=True)
return TemplateResponse(request, 'portal/collection.html', {'mediaitems_list': mediaitemslist, 'collection': collection, 'rss_list': rss_list })
def search(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description', 'tags__name'])
if request.user.is_authenticated():
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
else:
found_entries = MediaItem.objects.filter(entry_query, published=True).order_by('-date')
return TemplateResponse(request, 'portal/search_results.html', { 'query_string': query_string, 'mediaitems_list': found_entries})
def search_json(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description','tags__name'])
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
data = serializers.serialize('json', found_entries)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
def tag_json(request, tag):
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__name__in=[tag]).order_by('-date')
data = serializers.serialize('json', mediaitemslist)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
@login_required
def submittal(request, subm_id):
submittal = get_object_or_404(Submittal, pk = subm_id)
if request.method == 'POST':
form = SubmittalForm(request.POST)
if form.is_valid():
mediaitem = form.save()
mediaitem.user = request.user
mediaitem.save()
form.create_mediafiles(mediaitem)
mediaitem.get_and_save_duration()
return redirect(index)
else:
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
else:
form = SubmittalForm(initial={
'title': submittal.media_title,
'description': submittal.media_description,
'channel': submittal.media_channel,
'license': submittal.media_license,
'linkURL': submittal.media_linkURL,
'torrentURL': submittal.media_torrentURL,
'media_mp3URL': submittal.media_mp3URL,
'media_oggURL': submittal.media_oggURL,
'media_opusURL': submittal.media_opusURL,
'videoThumbURL': submittal.media_videoThumbURL,
'audioThumbURL': submittal.media_audioThumbURL,
'published': submittal.media_published,
'tags': ", ".join(str(x) for x in submittal.media_tags.all()),
'torrentDone': submittal.media_torrentDone,
'encodingDone': True,
})
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
@login_required
def upload_thumbnail(request):
if request.method == 'POST':
form = ThumbnailForm(request.POST, request.FILES or None)
if form.is_valid():
if (request.FILES['file'].content_type == 'image/png' or request.FILES['file'].content_type == 'image/jpeg') and not form.data['title'] == '':
_handle_uploaded_thumbnail(request.FILES['file'], form.data['title'])
message = _("The upload of %s was successful") % (form.data['title'])
form = ThumbnailForm()
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list, 'message': message})
else:
error = _("Please upload an image file")
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list, 'error': error})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list})
def _handle_uploaded_thumbnail(f, filename):
suffix = '.png' if (f.content_type == 'image/png') else '.jpg'
suffix = '' if (filename.endswith(suffix)) else suffix
destination = open(settings.THUMBNAILS_DIR + filename + suffix, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
@login_required
def submit(request):
''' The view for uploading the items. Only authenticated users can upload media items!
We use django tasks to make a new task task for encoding this items. If we use
bittorrent to distribute our files we also use django tasks to make the .torrent
files (this can take a few minutes for very large files '''
if request.method == 'POST':
form = MediaItemForm(request.POST, request.FILES or None)
if form.is_valid():
media_item = form.save()
if form.cleaned_data['thumbURL']:
media_item.audioThumbURL = form.cleaned_data['thumbURL']
media_item.videoThumbURL = form.cleaned_data['thumbURL']
media_item.user = request.user
media_item.save()
media_item.get_and_save_duration()
outputdir = settings.ENCODING_OUTPUT_DIR + media_item.slug
if not os.path.exists(outputdir):
os.makedirs(outputdir)
cover_task = djangotasks.task_for_object(media_item.get_and_save_cover)
djangotasks.run_task(cover_task)
for target_format in form.cleaned_data['fileFormats']:
media_format = MEDIA_FORMATS[target_format]
url = settings.ENCODED_BASE_URL + media_item.slug + '/' + media_item.slug + media_format.extension
media_file = MediaFile.objects.create(title=media_item.title + " " + media_format.text,
url=url, file_format=media_format.format_key,
media_item=media_item, mediatype=media_format.mediatype)
encoding_task = djangotasks.task_for_object(media_file.encode_media)
djangotasks.run_task(encoding_task)
if settings.USE_BITTORRENT:
torrent_task = djangotasks.task_for_object(media_item.create_bittorrent)
djangotasks.run_task(torrent_task)
return redirect(index)
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
else:
form = MediaItemForm()
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
@login_required
def status(request):
tasks_mediaitem = djangotasks.models.Task.objects.filter(model="portal.mediaitem").exclude(status="successful")
tasks_mediafile = djangotasks.models.Task.objects.filter(model="portal.mediafile").exclude(status="successful")
mediaitem_ids = set(map((lambda mediaitem: mediaitem.object_id), tasks_mediaitem))
for mediafile in tasks_mediafile:
try:
mediaitem_ids.add(MediaFile.objects.get(pk=mediafile.object_id).media_item.pk)
except MediaFile.DoesNotExist:
pass
mediaitems = MediaItem.objects.filter(pk__in=mediaitem_ids)
return TemplateResponse(request, 'portal/status.html', {'mediaitems': mediaitems})
def _normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> _normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def _get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = _normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def _get_thumbnails_list():
thumbnails_list = getThumbnails(settings.THUMBNAILS_DIR)
del thumbnails_list[0]
return thumbnails_list
|
|
"""The tests for the Input select component."""
# pylint: disable=protected-access
from unittest.mock import patch
import pytest
from homeassistant.components.input_select import (
ATTR_OPTION,
ATTR_OPTIONS,
CONF_INITIAL,
DOMAIN,
SERVICE_SELECT_FIRST,
SERVICE_SELECT_LAST,
SERVICE_SELECT_NEXT,
SERVICE_SELECT_OPTION,
SERVICE_SELECT_PREVIOUS,
SERVICE_SET_OPTIONS,
)
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import Context, State
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import entity_registry as er
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component
from tests.common import mock_restore_cache
@pytest.fixture
def storage_setup(hass, hass_storage):
"""Storage setup."""
async def _storage(items=None, config=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {
"items": [
{
"id": "from_storage",
"name": "from storage",
"options": ["storage option 1", "storage option 2"],
}
]
},
}
else:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": items},
}
if config is None:
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
@bind_hass
def select_option(hass, entity_id, option):
"""Set value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: entity_id, ATTR_OPTION: option},
)
)
@bind_hass
def select_next(hass, entity_id):
"""Set next value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_NEXT, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_previous(hass, entity_id):
"""Set previous value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_PREVIOUS, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_first(hass, entity_id):
"""Set first value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_FIRST, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_last(hass, entity_id):
"""Set last value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_LAST, {ATTR_ENTITY_ID: entity_id}
)
)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
# {'bad_options': {'options': None}},
{"bad_initial": {"options": [1, 2], "initial": 3}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_select_option(hass):
"""Test select_option methods."""
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {"test_1": {"options": ["some option", "another option"]}}},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "some option"
select_option(hass, entity_id, "another option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "another option"
select_option(hass, entity_id, "non existing option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "another option"
async def test_select_next(hass):
"""Test select_next methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
async def test_select_previous(hass):
"""Test select_previous methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
async def test_select_first_last(hass):
"""Test select_first and _last methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_first(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
select_last(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
async def test_config_options(hass):
"""Test configuration options."""
count_start = len(hass.states.async_entity_ids())
test_2_options = ["Good Option", "Better Option", "Best Option"]
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {"options": [1, 2]},
"test_2": {
"name": "Hello World",
"icon": "mdi:work",
"options": test_2_options,
"initial": "Better Option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
assert state_1 is not None
assert state_2 is not None
assert state_1.state == "1"
assert state_1.attributes.get(ATTR_OPTIONS) == ["1", "2"]
assert ATTR_ICON not in state_1.attributes
assert state_2.state == "Better Option"
assert state_2.attributes.get(ATTR_OPTIONS) == test_2_options
assert state_2.attributes.get(ATTR_FRIENDLY_NAME) == "Hello World"
assert state_2.attributes.get(ATTR_ICON) == "mdi:work"
async def test_set_options_service(hass):
"""Test set_options service."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
data = {ATTR_OPTIONS: ["test1", "test2"], "entity_id": entity_id}
await hass.services.async_call(DOMAIN, SERVICE_SET_OPTIONS, data)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test1"
select_option(hass, entity_id, "first option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test1"
select_option(hass, entity_id, "test2")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test2"
async def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {"options": ["first option", "middle option", "last option"]}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "last option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "first option"
async def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "middle option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "middle option"
async def test_input_select_context(hass, hass_admin_user):
"""Test that input_select context works."""
assert await async_setup_component(
hass,
"input_select",
{
"input_select": {
"s1": {"options": ["first option", "middle option", "last option"]}
}
},
)
state = hass.states.get("input_select.s1")
assert state is not None
await hass.services.async_call(
"input_select",
"select_next",
{"entity_id": state.entity_id},
True,
Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_select.s1")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
async def test_reload(hass, hass_admin_user, hass_read_only_user):
"""Test reload service."""
count_start = len(hass.states.async_entity_ids())
ent_reg = er.async_get(hass)
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
},
"test_2": {
"options": ["an option", "not an option"],
"initial": "an option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is not None
assert state_2 is not None
assert state_3 is None
assert state_1.state == "middle option"
assert state_2.state == "an option"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is None
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
DOMAIN: {
"test_2": {
"options": ["an option", "reloaded option"],
"initial": "reloaded option",
},
"test_3": {
"options": ["new option", "newer option"],
"initial": "newer option",
},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is None
assert state_2 is not None
assert state_3 is not None
assert state_2.state == "an option"
assert state_3.state == "newer option"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is not None
async def test_load_from_storage(hass, storage_setup):
"""Test set up from storage."""
assert await storage_setup()
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
async def test_editable_state_attribute(hass, storage_setup):
"""Test editable attribute."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option", "other option"]}}}
)
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
state = hass.states.get(f"{DOMAIN}.from_yaml")
assert state.state == "yaml option"
assert not state.attributes.get(ATTR_EDITABLE)
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option"]}}}
)
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
storage_ent = "from_storage"
yaml_ent = "from_yaml"
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert storage_ent in result
assert yaml_ent not in result
assert result[storage_ent][ATTR_NAME] == "from storage"
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test WS delete cleans up entity registry."""
assert await storage_setup()
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": f"{DOMAIN}/delete", f"{DOMAIN}_id": f"{input_id}"}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
async def test_update(hass, hass_ws_client, storage_setup):
"""Test updating min/max updates the state."""
items = [
{
"id": "from_storage",
"name": "from storage",
"options": ["yaml update 1", "yaml update 2"],
}
]
assert await storage_setup(items)
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["yaml update 1", "yaml update 2"]
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "newer option"],
CONF_INITIAL: "newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["new option", "newer option"]
await client.send_json(
{
"id": 7,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "no newer option"],
}
)
resp = await client.receive_json()
assert not resp["success"]
async def test_ws_create(hass, hass_ws_client, storage_setup):
"""Test create WS."""
assert await storage_setup(items=[])
input_id = "new_input"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/create",
"name": "New Input",
"options": ["new option", "even newer option"],
"initial": "even newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.state == "even newer option"
async def test_setup_no_config(hass, hass_admin_user):
"""Test component setup with no config."""
count_start = len(hass.states.async_entity_ids())
assert await async_setup_component(hass, DOMAIN, {})
with patch(
"homeassistant.config.load_yaml_config_file", autospec=True, return_value={}
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start == len(hass.states.async_entity_ids())
|
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import json
import shutil
import tempfile
import unittest
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.testing import options_for_unittests
from core import perf_benchmark
class PerfBenchmarkTest(unittest.TestCase):
def setUp(self):
self._output_dir = tempfile.mkdtemp()
self._chrome_root = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._output_dir, ignore_errors=True)
shutil.rmtree(self._chrome_root, ignore_errors=True)
def _PopulateGenFiles(self, output_dir=None):
root = output_dir if output_dir is not None else self._output_dir
gen_path = os.path.join(root, 'gen', 'components', 'subresource_filter',
'tools')
os.makedirs(gen_path)
# Just make an empty ruleset file.
open(os.path.join(gen_path, 'GeneratedRulesetData'), 'w').close()
placeholder_json = {
'subresource_filter' : {
'ruleset_version' : {
'content': '1000',
'format': 100,
'checksum': 0
}
}
}
with open(os.path.join(gen_path, 'default_local_state.json'), 'w') as f:
json.dump(placeholder_json, f)
def _ExpectAdTaggingProfileFiles(self, browser_options, expect_present):
files_to_copy = browser_options.profile_files_to_copy
local_state_to_copy = [
(s, d) for (s, d) in files_to_copy if d == 'Local State']
ruleset_data_to_copy = [
(s, d) for (s, d) in files_to_copy if d.endswith('Ruleset Data')]
num_expected_matches = 1 if expect_present else 0
self.assertEqual(num_expected_matches, len(local_state_to_copy))
self.assertEqual(num_expected_matches, len(ruleset_data_to_copy))
def testVariationArgs(self):
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._output_dir
if not options.browser_type:
options.browser_type = "any"
possible_browser = browser_finder.FindBrowser(options)
if possible_browser is None:
return
target_os = perf_benchmark.PerfBenchmark.FixupTargetOS(
possible_browser.target_os)
self.assertIsNotNone(target_os)
testing_config = json.dumps({
"OtherPlatformStudy": [{
"platforms": ["fake_platform"],
"experiments": [{
"name": "OtherPlatformFeature",
"enable_features": ["NonExistentFeature"]
}]
}],
"TestStudy": [{
"platforms": [target_os],
"experiments": [{
"name": "TestFeature",
"params": { "param1" : "value1" },
"enable_features": ["Feature1", "Feature2"],
"disable_features": ["Feature3", "Feature4"]}]}]})
variations_dir = os.path.join(self._output_dir, "testing", "variations")
os.makedirs(variations_dir)
fieldtrial_path = os.path.join(
variations_dir, "fieldtrial_testing_config.json")
with open(fieldtrial_path, "w") as f:
f.write(testing_config)
benchmark.CustomizeOptions(options)
# For non-Android, we expect to just pass the "--enable-field-trial-config"
# flag. For Android, due to binary size constraints, the flag cannot be
# used. We instead expect generated browser args from the testing config
# file. See the FIELDTRIAL_TESTING_ENABLED buildflag definition in
# components/variations/service/BUILD.gn for more details.
if not perf_benchmark.PerfBenchmark.IsAndroid(possible_browser):
expected_args = ['--enable-field-trial-config']
else:
expected_args = [
"--enable-features=Feature1<TestStudy,Feature2<TestStudy",
"--disable-features=Feature3<TestStudy,Feature4<TestStudy",
"--force-fieldtrials=TestStudy/TestFeature",
"--force-fieldtrial-params=TestStudy.TestFeature:param1/value1"
]
for arg in expected_args:
self.assertIn(arg, options.browser_options.extra_browser_args)
# Test 'reference' type, which has no variation params applied by default.
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._output_dir
options.browser_options.browser_type = 'reference'
benchmark.CustomizeOptions(options)
for arg in expected_args:
self.assertNotIn(arg, options.browser_options.extra_browser_args)
# Test compatibility mode, which has no variation params applied by default.
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._output_dir
options.browser_options.compatibility_mode = ['no-field-trials']
benchmark.CustomizeOptions(options)
for arg in expected_args:
self.assertNotIn(arg, options.browser_options.extra_browser_args)
def testNoAdTaggingRuleset(self):
# This tests (badly) assumes that util.GetBuildDirectories() will always
# return a list of multiple directories, with Debug ordered before Release.
# This is not the case if CHROMIUM_OUTPUT_DIR is set or a build.ninja file
# exists in the current working directory - in those cases, only a single
# directory is returned. So, abort early if we only get back one directory.
num_dirs = 0
for _ in util.GetBuildDirectories(self._chrome_root):
num_dirs += 1
if num_dirs < 2:
return
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
# Set the chrome root to avoid using a ruleset from an existing "Release"
# out dir.
options.chrome_root = self._output_dir
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRulesetReference(self):
self._PopulateGenFiles()
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.browser_options.browser_type = 'reference'
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRuleset(self):
self._PopulateGenFiles()
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
# Careful, do not parse the command line flag for 'chromium-output-dir', as
# that sets the global os environment variable CHROMIUM_OUTPUT_DIR,
# affecting other tests. See http://crbug.com/843994.
options.chromium_output_dir = self._output_dir
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, True)
def testAdTaggingRulesetNoExplicitOutDir(self):
self._PopulateGenFiles(os.path.join(self._chrome_root, 'out', 'Release'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._chrome_root
options.browser_options.browser_type = "release"
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, True)
def testAdTaggingRulesetNoExplicitOutDirAndroidChromium(self):
self._PopulateGenFiles(os.path.join(self._chrome_root, 'out', 'Default'))
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._chrome_root
# android-chromium is special cased to search for anything.
options.browser_options.browser_type = "android-chromium"
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, True)
def testAdTaggingRulesetOutputDirNotFound(self):
# Same as the above test but use Debug instead of Release. This should
# cause the benchmark to fail to find the ruleset because we only check
# directories matching the browser_type.
self._PopulateGenFiles(os.path.join(self._chrome_root, 'out', 'Debug'))
# This tests (badly) assumes that util.GetBuildDirectories() will always
# return a list of multiple directories, with Debug ordered before Release.
# This is not the case if CHROMIUM_OUTPUT_DIR is set or a build.ninja file
# exists in the current working directory - in those cases, only a single
# directory is returned. So, abort early if we only get back one directory.
num_dirs = 0
for _ in util.GetBuildDirectories(self._chrome_root):
num_dirs += 1
if num_dirs < 2:
return
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chrome_root = self._chrome_root
options.browser_options.browser_type = "release"
benchmark.CustomizeOptions(options)
self._ExpectAdTaggingProfileFiles(options.browser_options, False)
def testAdTaggingRulesetInvalidJson(self):
self._PopulateGenFiles()
json_path = os.path.join(
self._output_dir, 'gen', 'components', 'subresource_filter', 'tools',
'default_local_state.json')
self.assertTrue(os.path.exists(json_path))
with open(json_path, 'w') as f:
f.write('{some invalid : json, 19')
benchmark = perf_benchmark.PerfBenchmark()
options = options_for_unittests.GetCopy()
options.chromium_output_dir = self._output_dir
# Should fail due to invalid JSON.
with self.assertRaises(ValueError):
benchmark.CustomizeOptions(options)
|
|
# -*- coding: utf-8 -*-
"""
Managing Gateway Groups and interactions with multiple channels.
(c) 2008-2014, Holger Krekel and others
"""
import atexit
import sys
from functools import partial
from threading import Lock
from . import gateway_bootstrap
from . import gateway_io
from .gateway_base import get_execmodel
from .gateway_base import reraise
from .gateway_base import trace
from .xspec import XSpec
NO_ENDMARKER_WANTED = object()
class Group(object):
"""Gateway Groups."""
defaultspec = "popen"
def __init__(self, xspecs=(), execmodel="thread"):
"""initialize group and make gateways as specified.
execmodel can be 'thread' or 'eventlet'.
"""
self._gateways = []
self._autoidcounter = 0
self._autoidlock = Lock()
self._gateways_to_join = []
# we use the same execmodel for all of the Gateway objects
# we spawn on our side. Probably we should not allow different
# execmodels between different groups but not clear.
# Note that "other side" execmodels may differ and is typically
# specified by the spec passed to makegateway.
self.set_execmodel(execmodel)
for xspec in xspecs:
self.makegateway(xspec)
atexit.register(self._cleanup_atexit)
@property
def execmodel(self):
return self._execmodel
@property
def remote_execmodel(self):
return self._remote_execmodel
def set_execmodel(self, execmodel, remote_execmodel=None):
"""Set the execution model for local and remote site.
execmodel can be one of "thread" or "eventlet" (XXX gevent).
It determines the execution model for any newly created gateway.
If remote_execmodel is not specified it takes on the value
of execmodel.
NOTE: Execution models can only be set before any gateway is created.
"""
if self._gateways:
raise ValueError(
"can not set execution models if " "gateways have been created already"
)
if remote_execmodel is None:
remote_execmodel = execmodel
self._execmodel = get_execmodel(execmodel)
self._remote_execmodel = get_execmodel(remote_execmodel)
def __repr__(self):
idgateways = [gw.id for gw in self]
return "<Group %r>" % idgateways
def __getitem__(self, key):
if isinstance(key, int):
return self._gateways[key]
for gw in self._gateways:
if gw == key or gw.id == key:
return gw
raise KeyError(key)
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __len__(self):
return len(self._gateways)
def __iter__(self):
return iter(list(self._gateways))
def makegateway(self, spec=None):
"""create and configure a gateway to a Python interpreter.
The ``spec`` string encodes the target gateway type
and configuration information. The general format is::
key1=value1//key2=value2//...
If you leave out the ``=value`` part a True value is assumed.
Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``.
Valid configuration::
id=<string> specifies the gateway id
python=<path> specifies which python interpreter to execute
execmodel=model 'thread', 'eventlet', 'gevent' model for execution
chdir=<path> specifies to which directory to change
nice=<path> specifies process priority of new process
env:NAME=value specifies a remote environment variable setting.
If no spec is given, self.defaultspec is used.
"""
if not spec:
spec = self.defaultspec
if not isinstance(spec, XSpec):
spec = XSpec(spec)
self.allocate_id(spec)
if spec.execmodel is None:
spec.execmodel = self.remote_execmodel.backend
if spec.via:
assert not spec.socket
master = self[spec.via]
proxy_channel = master.remote_exec(gateway_io)
proxy_channel.send(vars(spec))
proxy_io_master = gateway_io.ProxyIO(proxy_channel, self.execmodel)
gw = gateway_bootstrap.bootstrap(proxy_io_master, spec)
elif spec.popen or spec.ssh or spec.vagrant_ssh:
io = gateway_io.create_io(spec, execmodel=self.execmodel)
gw = gateway_bootstrap.bootstrap(io, spec)
elif spec.socket:
from . import gateway_socket
io = gateway_socket.create_io(spec, self, execmodel=self.execmodel)
gw = gateway_bootstrap.bootstrap(io, spec)
else:
raise ValueError("no gateway type found for {!r}".format(spec._spec))
gw.spec = spec
self._register(gw)
if spec.chdir or spec.nice or spec.env:
channel = gw.remote_exec(
"""
import os
path, nice, env = channel.receive()
if path:
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
if nice and hasattr(os, 'nice'):
os.nice(nice)
if env:
for name, value in env.items():
os.environ[name] = value
"""
)
nice = spec.nice and int(spec.nice) or 0
channel.send((spec.chdir, nice, spec.env))
channel.waitclose()
return gw
def allocate_id(self, spec):
"""(re-entrant) allocate id for the given xspec object."""
if spec.id is None:
with self._autoidlock:
id = "gw" + str(self._autoidcounter)
self._autoidcounter += 1
if id in self:
raise ValueError("already have gateway with id {!r}".format(id))
spec.id = id
def _register(self, gateway):
assert not hasattr(gateway, "_group")
assert gateway.id
assert id not in self
self._gateways.append(gateway)
gateway._group = self
def _unregister(self, gateway):
self._gateways.remove(gateway)
self._gateways_to_join.append(gateway)
def _cleanup_atexit(self):
trace("=== atexit cleanup {!r} ===".format(self))
self.terminate(timeout=1.0)
def terminate(self, timeout=None):
"""trigger exit of member gateways and wait for termination
of member gateways and associated subprocesses. After waiting
timeout seconds try to to kill local sub processes of popen-
and ssh-gateways. Timeout defaults to None meaning
open-ended waiting and no kill attempts.
"""
while self:
vias = {}
for gw in self:
if gw.spec.via:
vias[gw.spec.via] = True
for gw in self:
if gw.id not in vias:
gw.exit()
def join_wait(gw):
gw.join()
gw._io.wait()
def kill(gw):
trace("Gateways did not come down after timeout: %r" % gw)
gw._io.kill()
safe_terminate(
self.execmodel,
timeout,
[
(partial(join_wait, gw), partial(kill, gw))
for gw in self._gateways_to_join
],
)
self._gateways_to_join[:] = []
def remote_exec(self, source, **kwargs):
"""remote_exec source on all member gateways and return
MultiChannel connecting to all sub processes.
"""
channels = []
for gw in self:
channels.append(gw.remote_exec(source, **kwargs))
return MultiChannel(channels)
class MultiChannel:
def __init__(self, channels):
self._channels = channels
def __len__(self):
return len(self._channels)
def __iter__(self):
return iter(self._channels)
def __getitem__(self, key):
return self._channels[key]
def __contains__(self, chan):
return chan in self._channels
def send_each(self, item):
for ch in self._channels:
ch.send(item)
def receive_each(self, withchannel=False):
assert not hasattr(self, "_queue")
l = []
for ch in self._channels:
obj = ch.receive()
if withchannel:
l.append((ch, obj))
else:
l.append(obj)
return l
def make_receive_queue(self, endmarker=NO_ENDMARKER_WANTED):
try:
return self._queue
except AttributeError:
self._queue = None
for ch in self._channels:
if self._queue is None:
self._queue = ch.gateway.execmodel.queue.Queue()
def putreceived(obj, channel=ch):
self._queue.put((channel, obj))
if endmarker is NO_ENDMARKER_WANTED:
ch.setcallback(putreceived)
else:
ch.setcallback(putreceived, endmarker=endmarker)
return self._queue
def waitclose(self):
first = None
for ch in self._channels:
try:
ch.waitclose()
except ch.RemoteError:
if first is None:
first = sys.exc_info()
if first:
reraise(*first)
def safe_terminate(execmodel, timeout, list_of_paired_functions):
workerpool = execmodel.WorkerPool()
def termkill(termfunc, killfunc):
termreply = workerpool.spawn(termfunc)
try:
termreply.get(timeout=timeout)
except IOError:
killfunc()
replylist = []
for termfunc, killfunc in list_of_paired_functions:
reply = workerpool.spawn(termkill, termfunc, killfunc)
replylist.append(reply)
for reply in replylist:
reply.get()
workerpool.waitall(timeout=timeout)
default_group = Group()
makegateway = default_group.makegateway
set_execmodel = default_group.set_execmodel
|
|
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.cross_validation import train_test_split
from sklearn.metrics import f1_score
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
#import sklearn.f1_score
#Load data
df = pd.read_csv("./input/creditcard.csv")
df.head()
# Describe the data
df.describe()
# Check for missing values No missing values, that makes things a little easier.
df.isnull().sum()
# Add in max max fraud amount
# Most transactions are small amounts, less than $100. Fraudulent transactions have a maximum value far less than normal transactions, $2,125.87 vs $25,691.16.
df['Amount_max_fraud'] = 1
df.loc[df.Amount <= 2125.87, 'Amount_max_fraud'] = 0
# Select anonymized features:
v_features = df.ix[:,1:29].columns
#Drop all of the features that have very similar distributions between the two types of transactions.
df = df.drop(['V28','V27','V26','V25','V24','V23','V22','V20','V15','V13','V8'], axis =1)
#Based on the plots above, these features are created to identify values where fraudulent transaction are more common.
df['V1_'] = df.V1.map(lambda x: 1 if x < -3 else 0)
df['V2_'] = df.V2.map(lambda x: 1 if x > 2.5 else 0)
df['V3_'] = df.V3.map(lambda x: 1 if x < -4 else 0)
df['V4_'] = df.V4.map(lambda x: 1 if x > 2.5 else 0)
df['V5_'] = df.V5.map(lambda x: 1 if x < -4.5 else 0)
df['V6_'] = df.V6.map(lambda x: 1 if x < -2.5 else 0)
df['V7_'] = df.V7.map(lambda x: 1 if x < -3 else 0)
df['V9_'] = df.V9.map(lambda x: 1 if x < -2 else 0)
df['V10_'] = df.V10.map(lambda x: 1 if x < -2.5 else 0)
df['V11_'] = df.V11.map(lambda x: 1 if x > 2 else 0)
df['V12_'] = df.V12.map(lambda x: 1 if x < -2 else 0)
df['V14_'] = df.V14.map(lambda x: 1 if x < -2.5 else 0)
df['V16_'] = df.V16.map(lambda x: 1 if x < -2 else 0)
df['V17_'] = df.V17.map(lambda x: 1 if x < -2 else 0)
df['V18_'] = df.V18.map(lambda x: 1 if x < -2 else 0)
df['V19_'] = df.V19.map(lambda x: 1 if x > 1.5 else 0)
df['V21_'] = df.V21.map(lambda x: 1 if x > 0.6 else 0)
#Create a new feature for normal (non-fraudulent) transactions.
df.loc[df.Class == 0, 'Normal'] = 1
df.loc[df.Class == 1, 'Normal'] = 0
#Rename 'Class' to 'Fraud'.
df = df.rename(columns={'Class': 'Fraud'})
#492 fraudulent transactions, 284,315 normal transactions.
#0.172% of transactions were fraud.
print(df.Normal.value_counts())
print()
print(df.Fraud.value_counts())
pd.set_option("display.max_columns",101)
pd.set_option('display.max_rows', 5)
df.head()
#Create dataframes of only Fraud and Normal transactions.
Fraud = df[df.Fraud == 1]
Normal = df[df.Normal == 1]
# Set X_train equal to 80% of the fraudulent transactions.
X_train = Fraud.sample(frac=0.8)
count_Frauds = len(X_train)
# Add 80% of the normal transactions to X_train.
X_train = pd.concat([X_train, Normal.sample(frac = 0.8)], axis = 0)
# X_test contains all the transaction not in X_train.
X_test = df.loc[~df.index.isin(X_train.index)]
#Shuffle the dataframes so that the training is done in a random order.
X_train = shuffle(X_train)
X_test = shuffle(X_test)
#Add our target features to y_train and y_test.
y_train = X_train.Fraud
y_train = pd.concat([y_train, X_train.Normal], axis=1)
y_test = X_test.Fraud
y_test = pd.concat([y_test, X_test.Normal], axis=1)
#Drop target features from X_train and X_test.
X_train = X_train.drop(['Fraud','Normal'], axis = 1)
X_test = X_test.drop(['Fraud','Normal'], axis = 1)
#Check to ensure all of the training/testing dataframes are of the correct length
print(len(X_train))
print(len(y_train))
print(len(X_test))
print(len(y_test))
'''
Due to the imbalance in the data, ratio will act as an equal weighting system for our model.
By dividing the number of transactions by those that are fraudulent, ratio will equal the value that when multiplied
by the number of fraudulent transactions will equal the number of normal transaction.
Simply put: # of fraud * ratio = # of normal
'''
ratio = len(X_train)/count_Frauds
y_train.Fraud *= ratio
y_test.Fraud *= ratio
#Names of all of the features in X_train.
features = X_train.columns.values
#Transform each feature in features so that it has a mean of 0 and standard deviation of 1;
#this helps with training the neural network.
for feature in features:
mean, std = df[feature].mean(), df[feature].std()
X_train.loc[:, feature] = (X_train[feature] - mean) / std
X_test.loc[:, feature] = (X_test[feature] - mean) / std
print("X Test:")
X_test.head()
print(X_test)
print("Y Test:")
y_test.head()
print(y_test)
# Split the testing data into validation and testing sets
split = int(len(y_test)/2)
inputX = X_train.as_matrix()
inputY = y_train.as_matrix()
inputX_valid = X_test.as_matrix()[:split]
inputY_valid = y_test.as_matrix()[:split]
inputX_test = X_test.as_matrix()[split:]
inputY_test = y_test.as_matrix()[split:]
# Number of input nodes.
input_nodes = 37
# Multiplier maintains a fixed ratio of nodes between each layer.
mulitplier = 1.5
# Number of nodes in each hidden layer
hidden_nodes1 = 18
hidden_nodes2 = round(hidden_nodes1 * mulitplier)
hidden_nodes3 = round(hidden_nodes2 * mulitplier)
# Percent of nodes to keep during dropout.
pkeep = tf.placeholder(tf.float32, [], 'pkeep')
# input
x = tf.placeholder(tf.float32, [None, input_nodes], 'inputdata')
# layer 1
W1 = tf.Variable(tf.truncated_normal([input_nodes, hidden_nodes1], stddev = 0.15))
b1 = tf.Variable(tf.zeros([hidden_nodes1]))
y1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
# layer 2
W2 = tf.Variable(tf.truncated_normal([hidden_nodes1, hidden_nodes2], stddev = 0.15))
b2 = tf.Variable(tf.zeros([hidden_nodes2]))
y2 = tf.nn.sigmoid(tf.matmul(y1, W2) + b2)
# layer 3
W3 = tf.Variable(tf.truncated_normal([hidden_nodes2, hidden_nodes3], stddev = 0.15))
b3 = tf.Variable(tf.zeros([hidden_nodes3]))
y3 = tf.nn.sigmoid(tf.matmul(y2, W3) + b3)
y3 = tf.nn.dropout(y3, pkeep)
# layer 4
W4 = tf.Variable(tf.truncated_normal([hidden_nodes3, 2], stddev = 0.15))
b4 = tf.Variable(tf.zeros([2]))
y4 = tf.nn.softmax(tf.matmul(y3, W4) + b4)
# output
y = y4
y_ = tf.placeholder(tf.float32, [None, 2], 'normal_fraud_features')
# Parameters
training_epochs = 2000
training_dropout = 0.9
display_step = 1 # 10
n_samples = y_train.shape[0]
batch_size = 2048
learning_rate = 0.005
target_accuracy = 0.995 # should be 0.999
# Cost function: Cross Entropy
cost = -tf.reduce_sum(y_ * tf.log(y))
# We will optimize our model via AdamOptimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Correct prediction if the most likely value (Fraud or Normal) from softmax equals the target value.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_summary = [] # Record accuracy values for plot
cost_summary = [] # Record cost values for plot
valid_accuracy_summary = []
valid_cost_summary = []
stop_early = 0 # To keep track of the number of epochs before early stopping
# Save the best weights so that they can be used to make the final predictions
checkpoint = "./output/checkpoint/model-checkpoint.ckpt"
saver = tf.train.Saver(max_to_keep=1)
# Initialize variables and tensorflow session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
for batch in range(int(n_samples/batch_size)):
batch_x = inputX[batch*batch_size : (1+batch)*batch_size]
batch_y = inputY[batch*batch_size : (1+batch)*batch_size]
sess.run([optimizer], feed_dict={x: batch_x,
y_: batch_y,
pkeep: training_dropout})
# Display logs after every 10 epochs
if (epoch) % display_step == 0:
train_accuracy, newCost = sess.run([accuracy, cost], feed_dict={x: inputX,
y_: inputY,
pkeep: training_dropout})
valid_accuracy, valid_newCost = sess.run([accuracy, cost], feed_dict={x: inputX_valid,
y_: inputY_valid,
pkeep: 1})
print ("Epoch:", epoch,
"Acc =", "{:.5f}".format(train_accuracy),
"Cost =", "{:.5f}".format(newCost),
"Valid_Acc =", "{:.5f}".format(valid_accuracy),
"Valid_Cost = ", "{:.5f}".format(valid_newCost))
# Save the weights if these conditions are met.
if epoch > 0 and valid_accuracy > max(valid_accuracy_summary) and valid_accuracy > target_accuracy:
print("Model being saved in path: %s" % checkpoint)
save_path = saver.save(sess, checkpoint)
print("Model saved in path: %s " % save_path)
# Record the results of the model
accuracy_summary.append(train_accuracy)
cost_summary.append(newCost)
valid_accuracy_summary.append(valid_accuracy)
valid_cost_summary.append(valid_newCost)
# If the model does not improve after 15 logs, stop the training.
if valid_accuracy < max(valid_accuracy_summary) and epoch > 200:
stop_early += 1
if stop_early == 20:
break
else:
stop_early = 0
print()
print("Optimization Finished!")
print()
with tf.Session() as sess:
# Load the best weights and show its results
saver.restore(sess, checkpoint)
training_accuracy = sess.run(accuracy, feed_dict={x: inputX, y_: inputY, pkeep: training_dropout})
validation_accuracy = sess.run(accuracy, feed_dict={x: inputX_valid, y_: inputY_valid, pkeep: 1})
print("Results using the best Valid_Acc:")
print()
print("Training Accuracy =", training_accuracy)
print("Validation Accuracy =", validation_accuracy)
# Plot the accuracy and cost summaries
#f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,4))
#ax1.plot(accuracy_summary) # blue
#ax1.plot(valid_accuracy_summary) # green
#ax1.set_title('Accuracy')
#ax2.plot(cost_summary)
#ax2.plot(valid_cost_summary)
#ax2.set_title('Cost')
#plt.xlabel('Epochs (x10)')
#plt.show()
# Find the predicted values, then use them to build a confusion matrix
predicted = tf.argmax(y, 1)
with tf.Session() as sess:
# Load the best weights
saver.restore(sess, checkpoint)
testing_predictions, testing_accuracy = sess.run([predicted, accuracy],
feed_dict={x: inputX_test, y_:inputY_test, pkeep: 1})
print("F1-Score =", f1_score(inputY_test[:,1], testing_predictions))
print("Testing Accuracy =", testing_accuracy)
print()
# c = confusion_matrix(inputY_test[:,1], testing_predictions)
# show_confusion_matrix(c, ['Fraud', 'Normal'])
# Save the model:
predicted = tf.argmax(y, 1)
with tf.Session() as sess:
# Reload the check point and save it out as a saved model
saver.restore(sess, checkpoint)
# init = tf.global_variables_initializer()
# sess.run(init)
inputs_dict = {
"x": x,
"a": y_
}
outputs_dict = {
"output": y
}
tf.saved_model.simple_save(
sess, './output/saved-model', inputs_dict, outputs_dict
)
print('Tensor to fetch as prediction: ', y.name)
# testing_predictions, testing_accuracy = sess.run([predicted],
# feed_dict={x: inputX_test, y_:inputY_test, pkeep: 1})
#
print("Testing Accuracy =", testing_accuracy)
print()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime as date
import os.path
import mock
from rally.cmd.commands import task
from rally import consts
from rally import exceptions
from tests.unit import fakes
from tests.unit import test
class TaskCommandsTestCase(test.TestCase):
def setUp(self):
super(TaskCommandsTestCase, self).setUp()
self.task = task.TaskCommands()
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_load_task(self, mock_open):
input_task = "{'ab': {{test}}}"
input_args = "{'test': 2}"
# NOTE(boris-42): Such order of files is because we are reading
# file with args before file with template.
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
result = self.task._load_task("in_task", task_args_file="in_args_path")
self.assertEqual(result, {"ab": 1})
mock_open.side_effect = [
mock.mock_open(read_data=input_task).return_value
]
result = self.task._load_task("in_task", task_args=input_args)
self.assertEqual(result, {"ab": 2})
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
result = self.task._load_task("in_task", task_args=input_args,
task_args_file="any_file")
self.assertEqual(result, {"ab": 2})
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_load_task_wrong_task_args_file(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task,
"in_task", task_args_file="in_args_path")
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_load_task_wrong_task_args_file_exception(self, mock_open):
mock_open.side_effect = IOError
self.assertRaises(IOError, self.task._load_task,
"in_task", task_args_file="in_args_path")
def test_load_task_wrong_input_task_args(self):
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task", "{'test': {}")
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task", "[]")
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_load_task_task_render_raise_exc(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {{t}}}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_load_task_task_not_in_yaml(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
@mock.patch("rally.cmd.commands.task.TaskCommands.detailed")
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
return_value={"some": "json"})
@mock.patch("rally.api.Task.create")
@mock.patch("rally.cmd.commands.task.api.Task.start")
def test_start(self, mock_api, mock_create_task, mock_load,
mock_task_detailed):
mock_create_task.return_value = (
dict(uuid="c1a9bbe-1ead-4740-92b5-0feecf421634",
created_at="2014-01-14 09:14:45.395822",
status="init", tag=None))
deployment_id = "e0617de9-77d1-4875-9b49-9d5789e29f20"
task_path = "path_to_config.json"
self.task.start(task_path, deployment_id)
mock_api.assert_called_once_with(deployment_id, {"some": "json"},
task=mock_create_task.return_value,
abort_on_sla_failure=False)
mock_load.assert_called_once_with(task_path, None, None)
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
side_effect=task.FailedToLoadTask)
def test_start_with_task_args(self, mock_load):
task_path = mock.MagicMock()
task_args = mock.MagicMock()
task_args_file = mock.MagicMock()
self.task.start(task_path, deployment="any", task_args=task_args,
task_args_file=task_args_file)
mock_load.assert_called_once_with(task_path, task_args, task_args_file)
@mock.patch("rally.cmd.commands.task.envutils.get_global")
def test_start_no_deployment_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.start, "path_to_config.json", None)
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task")
@mock.patch("rally.cmd.commands.task.api")
def test_start_invalid_task(self, mock_api, mock_load):
mock_api.Task.start.side_effect = exceptions.InvalidConfigException
result = self.task.start("task_path", "deployment", tag="tag")
self.assertEqual(1, result)
mock_api.Task.create.assert_called_once_with("deployment", "tag")
mock_api.Task.start.assert_called_once_with(
"deployment", mock_load.return_value,
task=mock_api.Task.create.return_value, abort_on_sla_failure=False)
@mock.patch("rally.cmd.commands.task.api")
def test_abort(self, mock_api):
test_uuid = "17860c43-2274-498d-8669-448eff7b073f"
mock_api.Task.abort = mock.MagicMock()
self.task.abort(test_uuid)
task.api.Task.abort.assert_called_once_with(test_uuid)
@mock.patch("rally.cmd.commands.task.envutils.get_global")
def test_abort_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.abort, None)
def test_status(self):
test_uuid = "a3e7cefb-bec2-4802-89f6-410cc31f71af"
value = {"task_id": "task", "status": "status"}
with mock.patch("rally.cmd.commands.task.db") as mock_db:
mock_db.task_get = mock.MagicMock(return_value=value)
self.task.status(test_uuid)
mock_db.task_get.assert_called_once_with(test_uuid)
@mock.patch("rally.cmd.commands.task.envutils.get_global")
def test_status_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.status, None)
@mock.patch("rally.cmd.commands.task.db")
def test_detailed(self, mock_db):
test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f"
value = {
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"data": {
"load_duration": 1.0,
"full_duration": 2.0,
"raw": [
{
"duration": 0.9,
"idle_duration": 0.5,
"scenario_output": {
"data": {
"a": 3
},
"errors": "some"
},
"atomic_actions": {
"a": 0.6,
"b": 0.7
},
"error": ["type", "message", "traceback"]
},
{
"duration": 0.5,
"idle_duration": 0.2,
"scenario_output": {
"data": {
"a": 1
},
"errors": "some"
},
"atomic_actions": {
"a": 0.2,
"b": 0.4
},
"error": None
},
{
"duration": 0.6,
"idle_duration": 0.4,
"scenario_output": {
"data": {
"a": 2
},
"errors": None
},
"atomic_actions": {
"a": 0.3,
"b": 0.5
},
"error": None
}
]
}
}
]
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
self.task.detailed(test_uuid, iterations_data=True)
@mock.patch("rally.cmd.commands.task.db")
@mock.patch("rally.cmd.commands.task.logging")
def test_detailed_task_failed(self, mock_logging, mock_db):
value = {
"id": "task",
"uuid": "task_uuid",
"status": consts.TaskStatus.FAILED,
"results": [],
"verification_log": "['1', '2', '3']"
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
mock_logging.is_debug.return_value = False
self.task.detailed("task_uuid")
mock_logging.is_debug.return_value = True
self.task.detailed("task_uuid")
@mock.patch("rally.cmd.commands.task.envutils.get_global")
def test_detailed_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.detailed, None)
@mock.patch("rally.cmd.commands.task.db")
def test_detailed_wrong_id(self, mock_db):
test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
mock_db.task_get_detailed = mock.MagicMock(return_value=None)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
@mock.patch("json.dumps")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_results(self, mock_get, mock_json):
task_id = "foo_task_id"
data = [
{"key": "foo_key", "data": {"raw": "foo_raw", "sla": [],
"load_duration": "lo_duration",
"full_duration": "fu_duration"}}
]
result = map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"],
"sla": x["data"]["sla"]}, data)
mock_results = mock.Mock(return_value=data)
mock_get.return_value = mock.Mock(get_results=mock_results)
self.task.results(task_id)
self.assertEqual(1, mock_json.call_count)
self.assertEqual(1, len(mock_json.call_args[0]))
self.assertSequenceEqual(result, mock_json.call_args[0][0])
self.assertEqual({"sort_keys": True, "indent": 4},
mock_json.call_args[1])
mock_get.assert_called_once_with(task_id)
@mock.patch("rally.cmd.commands.task.sys.stdout")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_results_no_data(self, mock_get, mock_stdout):
task_id = "foo_task_id"
mock_results = mock.Mock(return_value=[])
mock_get.return_value = mock.Mock(get_results=mock_results)
result = self.task.results(task_id)
mock_get.assert_called_once_with(task_id)
self.assertEqual(1, result)
expected_out = ("The task %s is still running, results will become"
" available when it is finished." % task_id)
mock_stdout.write.assert_has_calls([mock.call(expected_out)])
@mock.patch("rally.cmd.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cmd.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cmd.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cmd.commands.task.plot")
@mock.patch("rally.cmd.commands.task.webbrowser")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_report_one_uuid(self, mock_get, mock_web, mock_plot, mock_open,
mock_os, mock_validate):
task_id = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_results = mock.Mock(return_value=data)
mock_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_get, mock_web, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=task_id, out="/tmp/%s.html" % task_id)
mock_open.assert_called_once_with("/tmp/%s.html" % task_id, "w+")
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
mock_get.assert_called_once_with(task_id)
reset_mocks()
self.task.report(task_id, out="spam.html", open_it=True)
mock_web.open_new_tab.assert_called_once_with(
"file://realpath_spam.html")
@mock.patch("rally.cmd.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cmd.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cmd.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cmd.commands.task.plot")
@mock.patch("rally.cmd.commands.task.webbrowser")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_report_bunch_uuids(self, mock_get, mock_web, mock_plot, mock_open,
mock_os, mock_validate):
tasks = ["eb290c30-38d8-4c8f-bbcc-fc8f74b004ae",
"eb290c30-38d8-4c8f-bbcc-fc8f74b004af"]
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = []
for task_uuid in tasks:
results.extend(
map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]},
data))
mock_results = mock.Mock(return_value=data)
mock_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_get, mock_web, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=tasks, out="/tmp/1_test.html")
mock_open.assert_called_once_with("/tmp/1_test.html", "w+")
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
expected_get_calls = [mock.call(task) for task in tasks]
mock_get.assert_has_calls(expected_get_calls, any_order=True)
@mock.patch("rally.cmd.commands.task.json.load")
@mock.patch("rally.cmd.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cmd.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cmd.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cmd.commands.task.open", create=True)
@mock.patch("rally.cmd.commands.task.plot")
def test_report_one_file(self, mock_plot, mock_open, mock_os,
mock_validate, mock_path_exists, mock_json_load):
task_file = "/tmp/some_file.json"
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 1},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_plot.plot.return_value = "html_report"
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
def reset_mocks():
for m in mock_plot, mock_open, mock_json_load, mock_validate:
m.reset_mock()
self.task.report(tasks=task_file, out="/tmp/1_test.html")
expected_open_calls = [mock.call(task_file, "r"),
mock.call("/tmp/1_test.html", "w+")]
mock_open.assert_has_calls(expected_open_calls, any_order=True)
mock_plot.plot.assert_called_once_with(results)
mock_open.side_effect().write.assert_called_once_with("html_report")
@mock.patch("rally.cmd.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cmd.commands.task.json.load")
@mock.patch("rally.cmd.commands.task.open", create=True)
def test_report_exceptions(self, mock_open, mock_json_load,
mock_path_exists):
results = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}}]
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
for m in mock_open, mock_json_load:
m.reset_mock()
mock_path_exists.return_value = False
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
@mock.patch("rally.cmd.commands.task.cliutils.print_list")
@mock.patch("rally.cmd.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cmd.commands.task.objects.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=date.datetime.now(),
updated_at=date.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list(self, mock_objects_list, mock_default, mock_print_list):
self.task.list(status="running")
mock_objects_list.assert_called_once_with(
deployment=mock_default.return_value,
status=consts.TaskStatus.RUNNING)
headers = ["uuid", "deployment_name", "created_at", "duration",
"status", "tag"]
mock_print_list.assert_called_once_with(
mock_objects_list.return_value, headers,
sortby_index=headers.index("created_at"))
@mock.patch("rally.cmd.commands.task.cliutils.print_list")
@mock.patch("rally.cmd.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cmd.commands.task.objects.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=date.datetime.now(),
updated_at=date.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list_uuids_only(self, mock_objects_list, mock_default,
mock_print_list):
self.task.list(status="running", uuids_only=True)
mock_objects_list.assert_called_once_with(
deployment=mock_default.return_value,
status=consts.TaskStatus.RUNNING)
mock_print_list.assert_called_once_with(
mock_objects_list.return_value, ["uuid"],
print_header=False, print_border=False)
def test_list_wrong_status(self):
self.assertEqual(1, self.task.list(deployment="fake",
status="wrong non existing status"))
@mock.patch("rally.cmd.commands.task.objects.Task.list", return_value=[])
def test_list_no_results(self, mock_list):
self.assertIsNone(
self.task.list(deployment="fake", all_deployments=True))
mock_list.assert_called_once_with()
mock_list.reset_mock()
self.assertIsNone(
self.task.list(deployment="d", status=consts.TaskStatus.RUNNING)
)
mock_list.assert_called_once_with(deployment="d",
status=consts.TaskStatus.RUNNING)
def test_delete(self):
task_uuid = "8dcb9c5e-d60b-4022-8975-b5987c7833f7"
force = False
with mock.patch("rally.cmd.commands.task.api") as mock_api:
mock_api.Task.delete = mock.Mock()
self.task.delete(task_uuid, force=force)
mock_api.Task.delete.assert_called_once_with(task_uuid,
force=force)
@mock.patch("rally.cmd.commands.task.api")
def test_delete_multiple_uuid(self, mock_api):
task_uuids = ["4bf35b06-5916-484f-9547-12dce94902b7",
"52cad69d-d3e4-47e1-b445-dec9c5858fe8",
"6a3cb11c-ac75-41e7-8ae7-935732bfb48f",
"018af931-0e5a-40d5-9d6f-b13f4a3a09fc"]
force = False
self.task.delete(task_uuids, force=force)
self.assertTrue(mock_api.Task.delete.call_count == len(task_uuids))
expected_calls = [mock.call(task_uuid, force=force) for task_uuid
in task_uuids]
self.assertTrue(mock_api.Task.delete.mock_calls == expected_calls)
@mock.patch("rally.cmd.commands.task.cliutils.print_list")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_sla_check(self, mock_task_get, mock_print_list):
data = [{"key": {"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"},
"data": {"scenario_duration": 42.0,
"raw": [],
"sla": [{"benchmark": "KeystoneBasic.create_user",
"criterion": "max_seconds_per_iteration",
"pos": 0,
"success": False,
"detail": "Max foo, actually bar"}]}}]
mock_task_get().get_results.return_value = copy.deepcopy(data)
result = self.task.sla_check(task_id="fake_task_id")
self.assertEqual(1, result)
mock_task_get.assert_called_with("fake_task_id")
data[0]["data"]["sla"][0]["success"] = True
mock_task_get().get_results.return_value = data
result = self.task.sla_check(task_id="fake_task_id", tojson=True)
self.assertEqual(0, result)
@mock.patch("rally.cmd.commands.task.open",
mock.mock_open(read_data="{\"some\": \"json\"}"),
create=True)
@mock.patch("rally.api.Task.validate")
def test_validate(self, mock_validate):
self.task.validate("path_to_config.json", "fake_id")
mock_validate.assert_called_once_with("fake_id", {"some": "json"})
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
side_effect=task.FailedToLoadTask)
def test_validate_failed_to_load_task(self, mock_load):
args = mock.MagicMock()
args_file = mock.MagicMock()
result = self.task.validate("path_to_task", "fake_id",
task_args=args, task_args_file=args_file)
self.assertEqual(1, result)
mock_load.assert_called_once_with("path_to_task", args, args_file)
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task")
@mock.patch("rally.api.Task.validate")
def test_validate_invalid(self, mock_task_validate, mock_load):
mock_task_validate.side_effect = exceptions.InvalidTaskException
result = self.task.validate("path_to_task", "deployment")
self.assertEqual(1, result)
mock_task_validate.assert_called_once_with("deployment",
mock_load.return_value)
@mock.patch("rally.common.fileutils._rewrite_env_file")
@mock.patch("rally.cmd.commands.task.db.task_get", return_value=True)
def test_use(self, mock_task, mock_file):
task_id = "80422553-5774-44bd-98ac-38bd8c7a0feb"
self.task.use(task_id)
mock_file.assert_called_once_with(
os.path.expanduser("~/.rally/globals"),
["RALLY_TASK=%s\n" % task_id])
@mock.patch("rally.cmd.commands.task.db.task_get")
def test_use_not_found(self, mock_task):
task_id = "ddc3f8ba-082a-496d-b18f-72cdf5c10a14"
mock_task.side_effect = exceptions.TaskNotFound(uuid=task_id)
self.assertRaises(exceptions.TaskNotFound, self.task.use, task_id)
|
|
"""
Functions for building tensorflow computational graph models. RNN models,
and tensorflow loss functions will be added to this module.
"""
import tensorflow as tf
import math
from np_ops import fan_scale
def batch_normalize(tensor_in, epsilon=1e-5, decay=0.999):
"""
Batch Normalization:
`Batch Normalization Accelerating Deep Network Training by Reducing Internal Covariate Shift`_
An exponential moving average of means and variances in calculated to estimate sample mean
and sample variance for evaluations. For testing pair placeholder is_training
with [0] in feed_dict. For training pair placeholder is_training
with [1] in feed_dict. Example:
Let **train = 1** for training and **train = 0** for evaluation
.. code-block:: python
bn_deciders = {decider:[train] for decider in tf.get_collection('bn_deciders')}
feed_dict.update(bn_deciders)
During training the running statistics are updated, and batch statistics are used for normalization.
During testing the running statistics are not updated, and running statistics are used for normalization.
:param tensor_in: Input Tensor.
:param epsilon: A float number to avoid being divided by 0.
:param decay: For exponential decay estimate of running mean and variance.
:return: Tensor with variance bounded by a unit and mean of zero according to the batch.
"""
is_training = tf.placeholder(tf.int32, shape=[None]) # [1] or [0], Using a placeholder to decide which
# statistics to use for normalization allows
# either the running stats or the batch stats to
# be used without rebuilding the graph.
tf.add_to_collection('bn_deciders', is_training)
pop_mean = tf.Variable(tf.zeros([tensor_in.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([tensor_in.get_shape()[-1]]), trainable=False)
# calculate batch mean/var and running mean/var
batch_mean, batch_variance = tf.nn.moments(tensor_in, [0])
# The running mean/variance is updated when is_training == 1.
running_mean = tf.assign(pop_mean,
pop_mean * (decay + (1.0 - decay)*(1.0 - tf.to_float(is_training))) +
batch_mean * (1.0 - decay) * tf.to_float(is_training))
running_var = tf.assign(pop_var,
pop_var * (decay + (1.0 - decay)*(1.0 - tf.to_float(is_training))) +
batch_variance * (1.0 - decay) * tf.to_float(is_training))
# Choose statistic
mean = tf.nn.embedding_lookup(tf.pack([running_mean, batch_mean]), is_training)
variance = tf.nn.embedding_lookup(tf.pack([running_var, batch_variance]), is_training)
shape = tensor_in.get_shape().as_list()
gamma = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[shape[1]], name='gamma'))
beta = tf.Variable(tf.constant(1.0, dtype=tf.float32, shape=[shape[1]], name='beta'))
# Batch Norm Transform
inv = tf.rsqrt(epsilon + variance)
tensor_in = beta * (tensor_in - mean) * inv + gamma
return tensor_in
def dropout(tensor_in, prob):
"""
Adds dropout node.
`Dropout A Simple Way to Prevent Neural Networks from Overfitting`_
:param tensor_in: Input tensor.
:param prob: The percent of units to keep.
:return: Tensor of the same shape of *tensor_in*.
"""
if isinstance(prob, float):
keep_prob = tf.placeholder(tf.float32)
tf.add_to_collection('dropout_prob', (keep_prob, prob))
return tf.nn.dropout(tensor_in, keep_prob)
def dnn(x, layers=[100, 408], act=tf.nn.relu, scale_range=1.0, bn=False, keep_prob=None, name='nnet'):
"""
An arbitrarily deep neural network. Output has non-linear activation.
:param x: Input to the network.
:param layers: List of sizes of network layers.
:param act: Activation function to produce hidden layers of neural network.
:param scale_range: Scaling factor for initial range of weights (Set to 1/sqrt(fan_in) for tanh, sqrt(2/fan_in) for relu.
:param bn: Whether to use batch normalization.
:param keep_prob: The percent of nodes to keep in dropout layers.
:param name: For naming and variable scope.
:return: (tf.Tensor) Output of neural net. This will be just following a linear transform,
so that final activation has not been applied.
"""
for ind, hidden_size in enumerate(layers):
with tf.variable_scope('layer_%s' % ind):
fan_in = x.get_shape().as_list()[1]
W = tf.Variable(fan_scale(scale_range, act, x)*tf.truncated_normal([fan_in, hidden_size],
mean=0.0, stddev=1.0,
dtype=tf.float32, seed=None, name='W'))
tf.add_to_collection(name + '_weights', W)
b = tf.Variable(tf.zeros([hidden_size]))
tf.add_to_collection(name + '_bias', b)
x = tf.matmul(x,W) + b
if bn:
x = batch_normalize(x)
x = act(x, name='h' + str(ind)) # The hidden layer
tf.add_to_collection(name + '_activation', x)
if keep_prob:
x = dropout(x, keep_prob)
return x
def join_multivariate_inputs(feature_spec, specs, embedding_ratio, max_embedding, min_embedding):
"""
Makes placeholders for all input data, performs a lookup on an embedding matrix for each categorical feature,
and concatenates the resulting real-valued vectors from individual features into a single vector for each data point in the batch.
:param feature_spec: A dict {categorical: [c1, c2, ..., cp], continuous:[f1, f2, ...,fk]
which lists which features to use as categorical and continuous inputs to the model.
c1, ..., cp, f1, ...,fk should match a key in specs.
:param specs: A python dict containing information about which indices in the incoming data point correspond to which features.
Entries for continuous features list the indices for the feature, while entries for categorical features
contain a dictionary- {'index': i, 'num_classes': c}, where i and c are the index into the datapoint, and number of distinct
categories for the category in question.
:param embedding_ratio: Determines size of embedding vectors for each categorical feature: num_classes*embedding_ratio (within limits below)
:param max_embedding: A limit on how large an embedding vector can be.
:param min_embedding: A limit on how small an embedding vector can be.
:return: A tuple (x, placeholderdict):
(tensor with shape [None, Sum_of_lengths_of_all_continuous_feature_vecs_and_embedding_vecs],
dict to store tf placeholders to pair with data, )
"""
placeholderdict, embeddings, continuous_features, targets = {}, {}, {}, {}
# Make placeholders for all input data and select embeddings for categorical data
for dataname in feature_spec['categorical']:
embedding_size = math.ceil(embedding_ratio * specs[dataname]['num_classes'])
embedding_size = int(max(min(max_embedding, embedding_size), min_embedding))
with tf.variable_scope(dataname):
placeholderdict[dataname] = tf.placeholder(tf.int32, [None])
embedding_matrix = tf.Variable(1e-5*tf.truncated_normal((specs[dataname]['num_classes'], embedding_size), dtype=tf.float32))
embeddings[dataname] = tf.nn.embedding_lookup(embedding_matrix, placeholderdict[dataname])
for dataname in feature_spec['continuous']:
placeholderdict[dataname] = tf.placeholder(tf.float32, [None, len(specs[dataname])])
continuous_features[dataname] = placeholderdict[dataname]
# concatenate all features
return tf.concat(1, continuous_features.values() + embeddings.values(), name='features'), placeholderdict
# ============================================================
# ================ LOSS FUNCTIONS ============================
# ============================================================
def softmax_dist_loss(truth, h, dimension, scale_range=1.0):
"""
This function paired with a tensorflow optimizer is multinomial logistic regression.
It is designed for cotegorical predictions.
:param truth: A tensorflow vector tensor of integer class labels.
:param h: A placeholder if doing simple multinomial logistic regression, or the output of some neural network.
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for
tanh activation and sqrt(2/fan_in) for relu activation.
:return: (Tensor[MB X 1]) Cross-entropy of true distribution vs. predicted distribution.
"""
fan_in = h.get_shape().as_list()[1]
U = tf.Variable(fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, dimension],
dtype=tf.float32,
name='W'))
b = tf.Variable(tf.zeros([dimension]))
y = tf.nn.softmax(tf.matmul(h, U) + b)
loss_column = -tf.log(tf.diag_part(tf.nn.embedding_lookup(tf.transpose(y), truth)))
loss_column = tf.reshape(loss_column, [-1, 1])
return loss_column
def eyed_mvn_loss(truth, h, scale_range=1.0):
"""
This function takes the output of a neural network after it's last activation, performs an affine transform,
and returns the squared error of this result and the target.
:param truth: A tensor of target vectors.
:param h: The output of a neural network post activation.
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for
tanh activation and sqrt(2/fan_in) for relu activation.
:return: (tf.Tensor[MB X D], None) squared_error, None
"""
fan_in = h.get_shape().as_list()[1]
dim = truth.get_shape().as_list()[1]
U = tf.Variable(fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, dim],
dtype=tf.float32, name='U'))
b = tf.Variable(tf.zeros([dim]))
y = tf.matmul(h, U) + b
loss_columns = tf.square(y-truth)
return loss_columns, None
def diag_mvn_loss(truth, h, scale_range=1.0, variance_floor=0.1):
"""
Takes the output of a neural network after it's last activation, performs an affine transform.
It returns the mahalonobis distances between the targets and the result of the affine transformation, according
to a parametrized Normal distribution with diagonal covariance. The log of the determinant of the parametrized
covariance matrix is meant to be minimized to avoid a trivial optimization.
:param truth: (tf.Tensor) The targets for this minibatch.
:param h:(tf.Tensor) The output of dnn.
(Here the output of dnn , h, is assumed to be the same dimension as truth)
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for
tanh activation and sqrt(2/fan_in) for relu activation.
:param variance_floor: (float, positive) To ensure model doesn't find trivial optimization.
:return: (tf.Tensor[MB X D], tf.Tensor[MB X 1]) Loss matrix, log_of_determinants of covariance matrices.
"""
fan_in = h.get_shape().as_list()[1]
dim = truth.get_shape().as_list()[1]
U = tf.Variable(
fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, 2 * dim],
dtype=tf.float32,
name='U'))
b = tf.Variable(tf.zeros([2 * dim]))
y = tf.matmul(h, U) + b
mu, var = tf.split(1, 2, y) # split y into two even sized matrices, each with half the columns
var = tf.maximum(tf.exp(var), # make the variance non-negative
tf.constant(variance_floor, shape=[dim], dtype=tf.float32))
logdet = tf.reduce_sum(tf.log(var), 1) # MB x 1
loss_columns = tf.square(truth - mu) / var # MB x D
return loss_columns, tf.reshape(logdet, [-1, 1])
def full_mvn_loss(truth, h, scale_range=1.0):
"""
Takes the output of a neural network after it's last activation, performs an affine transform.
It returns the mahalonobis distances between the targets and the result of the affine transformation, according
to a parametrized Normal distribution. The log of the determinant of the parametrized
covariance matrix is meant to be minimized to avoid a trivial optimization.
:param truth: Actual datapoints to compare against learned distribution
:param h: output of neural network (after last non-linear transform)
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for
tanh activation and sqrt(2/fan_in) for relu activation.
:return: (tf.Tensor[MB X D], tf.Tensor[MB X 1]) Loss matrix, log_of_determinants of covariance matrices.
"""
fan_in = h.get_shape().as_list()[1]
dimension = truth.get_shape().as_list()
U = tf.Variable(
fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, dimension + dimension**2],
dtype=tf.float32,
name='U'))
b = tf.Variable(tf.zeros([2 * dimension + dimension**2]))
y = tf.matmul(h, U) + b
mu = tf.slice(y, [0, 0], [-1, dimension]) # is MB x dimension
var = tf.slice(y, [0, dimension], [-1, -1]) # is MB x dimension^2
var = tf.reshape(var, [-1, dimension, dimension]) # make it a MB x D x D tensor (var is a superset of the lower triangular part of a Cholesky decomp)
z = tf.batch_matrix_triangular_solve(var, truth - mu, lower=True, adjoint=False) # z should be MB x D
inner_prods = tf.reduce_sum(tf.square(z), 1) # take row-wise inner products of z, leaving MB x 1 vector
logdet = tf.reduce_sum(tf.log(tf.square(tf.batch_matrix_diag_part(var))), 1) # diag_part converts MB x D x D to MB x D, square and log preserve, then sum makes MB x 1
loss_column = inner_prods # is MB x 1 ... hard to track of individual features' contributions due to correlations
return loss_column, tf.reshape(logdet, [-1, 1])
def multivariate_loss(h, loss_spec, placeholder_dict):
"""
Computes a multivariate loss according to loss_spec.
:param h: Final hidden layer of dnn or rnn. (Post-activation)
:param loss_spec: A tuple of 3-tuples of the form (input_name, loss_function, dimension) where
input_name is the same as a target in datadict,
loss_function takes two parameters, a target and prediction,
and dimension is the dimension of the target.
:param placeholder_dict: A dictionary to store placeholder tensors for target values.
:return loss_matrix: (MB X concatenated_feature_size Tensor) Contains loss for all contributors for each data point.
"""
log_det_list, log_det_names, loss_list, loss_names = [], [], [], []
print("loss distributions:")
for i, (input_name, loss_func, dimension) in enumerate(loss_spec):
print("%s\t%s\t%s\t%s" %(i, input_name, loss_func, dimension))
with tf.variable_scope(input_name):
# this input will be a (classification or regression) target - need to define a placeholder for it
if loss_func == softmax_dist_loss:
x = tf.placeholder(tf.int32, [None])
else:
x = tf.placeholder(tf.float32, [None, dimension])
placeholder_dict["target_%s" % input_name] = x
# predict this input from the current hidden state
if loss_func == softmax_dist_loss: # discrete
component_wise_point_loss = loss_func(x, h, dimension)# MB X 1
else: # continuous
component_wise_point_loss, logdet = loss_func(x, h)# MB X DIM_MULTIVARIATE, MB X 1
if logdet is not None:
log_det_list.append(logdet)
loss_list.append(component_wise_point_loss)
loss_list.extend(log_det_list)
loss_matrix = tf.concat(1, loss_list) # is MB x (total num contributors)
return loss_matrix
|
|
#!/usr/bin/env python3
"""
Created on Tue Oct 22 02:37:52 2019.
@author: mtageld
"""
import numpy as np
from PIL import Image
from histomicstk.annotations_and_masks.annotation_and_mask_utils import \
get_image_from_htk_response
from histomicstk.annotations_and_masks.masks_to_annotations_handler import (
get_annotation_documents_from_contours, get_contours_from_mask)
from histomicstk.features.compute_intensity_features import \
compute_intensity_features
from histomicstk.preprocessing.color_conversion import (lab_mean_std,
rgb_to_hsi, rgb_to_lab)
from histomicstk.preprocessing.color_deconvolution import (
_reorder_stains, color_deconvolution_routine,
rgb_separate_stains_macenko_pca)
from histomicstk.preprocessing.color_normalization import (
deconvolution_based_normalization, reinhard)
from histomicstk.saliency.tissue_detection import (_get_largest_regions,
get_slide_thumbnail,
get_tissue_mask,
threshold_multichannel)
from histomicstk.utils.general_utils import Base_HTK_Class
Image.MAX_IMAGE_PIXELS = None
class CDT_single_tissue_piece:
"""Detect various regions in a single tissue piece (internal)."""
def __init__(self, cdt, tissue_mask, monitorPrefix=""):
"""Detect whitespace, saliency, etc in one tissue piece (Internal).
Arguments
----------
cdt : object
Cellularity_detector_thresholding instance
tissue_mask : np array
(m x n) mask of the tissue piece at cdt.MAG magnification
monitorPrefix : str
Text to prepend to printed statements
"""
self.cdt = cdt
self.tissue_mask = 0 + tissue_mask
self.monitorPrefix = monitorPrefix
def run(self):
"""Get cellularity and optionally visualize on DSA."""
self.restrict_mask_to_single_tissue_piece()
self.cdt._print2("%s: set_tissue_rgb()" % self.monitorPrefix)
self.set_tissue_rgb()
self.cdt._print2("%s: initialize_labeled_mask()" % self.monitorPrefix)
self.initialize_labeled_mask()
self.cdt._print2(
"%s: assign_components_by_thresholding()" % self.monitorPrefix)
self.assign_components_by_thresholding()
self.cdt._print2(
"%s: color_normalize_unspecified_components()"
% self.monitorPrefix)
self.color_normalize_unspecified_components()
self.cdt._print2(
"%s: find_potentially_cellular_regions()" % self.monitorPrefix)
self.find_potentially_cellular_regions()
self.cdt._print2(
"%s: find_top_cellular_regions()" % self.monitorPrefix)
self.find_top_cellular_regions()
if self.cdt.visualize:
self.cdt._print2("%s: visualize_results()" % self.monitorPrefix)
self.visualize_results()
def restrict_mask_to_single_tissue_piece(self):
"""Only keep relevant part of slide mask."""
# find coordinates at scan magnification
tloc = np.argwhere(self.tissue_mask)
F = self.cdt.slide_info['F_tissue']
self.ymin, self.xmin = [int(j) for j in np.min(tloc, axis=0) * F]
self.ymax, self.xmax = [int(j) for j in np.max(tloc, axis=0) * F]
self.tissue_mask = self.tissue_mask[
int(self.ymin / F): int(self.ymax / F),
int(self.xmin / F): int(self.xmax / F)]
def set_tissue_rgb(self):
"""Load RGB from server for single tissue piece."""
# load RGB for this tissue piece at saliency magnification
getStr = "/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d&encoding=PNG" % (
self.cdt.slide_id, self.xmin, self.xmax, self.ymin, self.ymax
) + "&magnification=%d" % self.cdt.MAG
resp = self.cdt.gc.get(getStr, jsonResp=False)
self.tissue_rgb = get_image_from_htk_response(resp)
def initialize_labeled_mask(self):
"""Initialize labeled components mask."""
from skimage.transform import resize
# resize tissue mask to target mag
self.labeled = resize(
self.tissue_mask, output_shape=self.tissue_rgb.shape[:2],
order=0, preserve_range=True, anti_aliasing=False)
self.labeled[self.labeled > 0] = self.cdt.GTcodes.loc[
'not_specified', 'GT_code']
def assign_components_by_thresholding(self):
"""Get components by thresholding in HSI and LAB spaces."""
# get HSI and LAB images
self.cdt._print2(
"%s: -- get HSI and LAB images ..." % self.monitorPrefix)
tissue_hsi = rgb_to_hsi(self.tissue_rgb)
tissue_lab = rgb_to_lab(self.tissue_rgb)
# extract components using HSI/LAB thresholds
hsi_components = self.cdt.hsi_thresholds.keys()
lab_components = self.cdt.lab_thresholds.keys()
for component in self.cdt.ordered_components:
self.cdt._print2("%s: -- thresholding %s ..." % (
self.monitorPrefix, component))
if component in hsi_components:
lab, _ = threshold_multichannel(
tissue_hsi,
channels=['hue', 'saturation', 'intensity'],
thresholds=self.cdt.hsi_thresholds[component],
just_threshold=False,
get_tissue_mask_kwargs=self.cdt.get_tissue_mask_kwargs2)
elif component in lab_components:
lab, _ = threshold_multichannel(
tissue_lab,
channels=['l', 'a', 'b'],
thresholds=self.cdt.lab_thresholds[component],
just_threshold=True,
get_tissue_mask_kwargs=self.cdt.get_tissue_mask_kwargs2)
else:
raise ValueError("Unknown component name.")
lab[self.labeled == 0] = 0 # restrict to tissue mask
self.labeled[lab > 0] = self.cdt.GTcodes.loc[component, 'GT_code']
# This deals with holes in tissue
self.labeled[self.labeled == 0] = self.cdt.GTcodes.loc[
'outside_tissue', 'GT_code']
def color_normalize_unspecified_components(self):
"""Color normalize "true" tissue components."""
if self.cdt.color_normalization_method == 'reinhard':
self.cdt._print2(
"%s: -- reinhard normalization ..." % self.monitorPrefix)
self.tissue_rgb = reinhard(
self.tissue_rgb,
target_mu=self.cdt.target_stats_reinhard['mu'],
target_sigma=self.cdt.target_stats_reinhard['sigma'],
mask_out=self.labeled != self.cdt.GTcodes
.loc["not_specified", "GT_code"])
elif self.cdt.color_normalization_method == 'macenko_pca':
self.cdt._print2(
"%s: -- macenko normalization ..." % self.monitorPrefix)
self.tissue_rgb = deconvolution_based_normalization(
self.tissue_rgb, W_target=self.cdt.target_W_macenko,
mask_out=self.labeled != self.cdt.GTcodes
.loc["not_specified", "GT_code"],
stain_unmixing_routine_params=self.
cdt.stain_unmixing_routine_params)
else:
self.cdt._print2("%s: -- No normalization!" % self.monitorPrefix)
def find_potentially_cellular_regions(self):
"""Find regions that are potentially cellular."""
from scipy import ndimage
from skimage.filters import gaussian
mask_out = self.labeled != self.cdt.GTcodes.loc[
"not_specified", "GT_code"]
# deconvolvve to ge hematoxylin channel (cellular areas)
# hematoxylin channel return shows MINIMA so we invert
self.tissue_htx, _, _ = color_deconvolution_routine(
self.tissue_rgb, mask_out=mask_out,
**self.cdt.stain_unmixing_routine_params)
self.tissue_htx = 255 - self.tissue_htx[..., 0]
# get cellular regions by threshold HTX stain channel
self.maybe_cellular, _ = get_tissue_mask(
self.tissue_htx.copy(), deconvolve_first=False,
n_thresholding_steps=1, sigma=self.cdt.cellular_step1_sigma,
min_size=self.cdt.cellular_step1_min_size)
# Second, low-pass filter to dilate and smooth a bit
self.maybe_cellular = gaussian(
0 + (self.maybe_cellular > 0), sigma=self.cdt.cellular_step2_sigma,
output=None, mode='nearest', preserve_range=True)
# find connected components
self.maybe_cellular, _ = ndimage.label(self.maybe_cellular)
# restrict cellular regions to not-otherwise-specified
self.maybe_cellular[mask_out] = 0
# assign to mask
self.labeled[self.maybe_cellular > 0] = self.cdt.GTcodes.loc[
'maybe_cellular', 'GT_code']
def find_top_cellular_regions(self):
"""Keep largest and most cellular regions."""
# keep only largest n regions regions
top_cellular_mask = _get_largest_regions(
self.maybe_cellular, top_n=self.cdt.cellular_largest_n)
top_cellular = self.maybe_cellular.copy()
top_cellular[top_cellular_mask == 0] = 0
# get intensity features of hematoxylin channel for each region
intensity_feats = compute_intensity_features(
im_label=top_cellular, im_intensity=self.tissue_htx,
feature_list=['Intensity.Mean'])
unique = np.unique(top_cellular[top_cellular > 0])
intensity_feats.index = unique
# get top n brightest regions from the largest areas
intensity_feats.sort_values("Intensity.Mean", axis=0, inplace=True)
discard = np.array(intensity_feats.index[:-self.cdt.cellular_top_n])
discard = np.in1d(top_cellular, discard).reshape(top_cellular.shape)
top_cellular[discard] = 0
# integrate into labeled mask
self.labeled[top_cellular > 0] = self.cdt.GTcodes.loc[
'top_cellular', 'GT_code']
def visualize_results(self):
"""Visualize results in DSA."""
# get contours
contours_df = get_contours_from_mask(
MASK=self.labeled, GTCodes_df=self.cdt.GTcodes.copy(),
groups_to_get=self.cdt.groups_to_visualize,
get_roi_contour=self.cdt.get_roi_contour, roi_group='roi',
background_group='not_specified',
discard_nonenclosed_background=True,
MIN_SIZE=15, MAX_SIZE=None,
verbose=self.cdt.verbose == 3,
monitorPrefix=self.monitorPrefix + ": -- contours")
# get annotation docs
annprops = {
'F': self.cdt.slide_info['magnification'] / self.cdt.MAG,
'X_OFFSET': self.xmin,
'Y_OFFSET': self.ymin,
'opacity': self.cdt.opacity,
'lineWidth': self.cdt.lineWidth,
}
annotation_docs = get_annotation_documents_from_contours(
contours_df.copy(), separate_docs_by_group=True,
docnamePrefix=self.cdt.docnameprefix,
annprops=annprops, verbose=self.cdt.verbose == 3,
monitorPrefix=self.monitorPrefix + ": -- annotation docs")
# post annotations to slide
for doc in annotation_docs:
_ = self.cdt.gc.post(
"/annotation?itemId=" + self.cdt.slide_id, json=doc)
class Cellularity_detector_thresholding(Base_HTK_Class):
"""Detect cellular regions in a slide using thresholding.
This uses a thresholding and stain unmixing based pipeline
to detect highly-cellular regions in a slide. The run()
method of the CDT_single_tissue_piece() class has the key
steps of the pipeline. In summary, here are the steps
involved...
1. Detect tissue from background using the RGB slide
thumbnail. Each "tissue piece" is analysed independently
from here onwards. The tissue_detection modeule is used
for this step. A high sensitivity, low specificity setting
is used here.
2. Fetch the RGB image of tissue at target magnification. A
low magnification (default is 3.0) is used and is sufficient.
3. The image is converted to HSI and LAB spaces. Thresholding
is performed to detect various non-salient components that
often throw-off the color normalization and deconvolution
algorithms. Thresholding includes both minimum and maximum
values. The user can set whichever thresholds of components
they would like. The development of this workflow was focused
on breast cancer so the thresholded components by default
are whote space (or adipose tissue), dark blue/green blotches
(sharpie, inking at margin, etc), and blood. Whitespace
is obtained by thresholding the saturation and intensity,
while other components are obtained by thresholding LAB.
4. Now that we know where "actual" tissue is, we do a MASKED
color normalization to a prespecified standard. The masking
ensures the normalization routine is not thrown off by non-
tissue components.
5. Perform masked stain unmixing/deconvolution to obtain the
hematoxylin stain channel.
6. Smooth and threshold the hematoxylin channel. Then
perform connected component analysis to find contiguous
potentially-cellular regions.
7. Keep the n largest potentially-cellular regions. Then
from those large regions, keep the m brightest regions
(using hematoxylin channel brightness) as the final
salient/cellular regions.
"""
def __init__(self, gc, slide_id, GTcodes, **kwargs):
"""Init Cellularity_Detector_Superpixels object.
Arguments:
-----------
gc : object
girder client object
slide_id : str
girder ID of slide
GTcodes : pandas Dataframe
the ground truth codes and information dataframe.
WARNING: Modified inside this method so pass a copy.
This is a dataframe that is indexed by the annotation group name
and has the following columns...
group: str
group name of annotation, eg. mostly_tumor
overlay_order: int
how early to place the annotation in the
mask. Larger values means this annotation group is overlayed
last and overwrites whatever overlaps it.
GT_code: int
desired ground truth code (in the mask).
Pixels of this value belong to corresponding group (class)
is_roi: bool
whether this group encodes an ROI
is_background_class: bool
whether this group is the default fill value inside the ROI.
For example, you may decide that any pixel inside the ROI
is considered stroma.
color: str
rgb format. eg. rgb(255,0,0)
The following indexes must be present...
outside_tissue, not_specified, maybe_cellular, top_cellular
verbose : int
0 - Do not print to screen
1 - Print only key messages
2 - Print everything to screen
3 - print everything including from inner functions
monitorPrefix : str
text to prepend to printed statements
logging_savepath : str or None
where to save run logs
suppress_warnings : bool
whether to suppress warnings
MAG : float
magnification at which to detect cellularity
color_normalization_method : str
Must be in ['reinhard', 'macenko_pca', 'none']
target_W_macenko : np array
3 by 3 stain matrix for macenko normalization
obtained using rgb_separate_stains_macenko_pca()
and reordered such that hematoxylin and eosin are
the first and second channels, respectively.
target_stats_reinhard : dict
must contains the keys mu and sigma. Mean and sigma
of target image in LAB space for reinhard normalization.
get_tissue_mask_kwargs : dict
kwargs for the get_tissue_mask() method. This is used
to detect tissue from the slide thumbnail.
keep_components : list
list of strings. Names of components to exclude by
HSI thresholding. These much be present in the index
of the GTcodes dataframe
get_tissue_mask_kwargs2 : dict
kwargs for get_tissue_mask() used for iterative smoothing
and thresholding the component masks after initial
thresholding using the user-defined HSI/LAB thresholds.
hsi_thresholds : dict
each entry is a dict containing the keys hue, saturation
and intensity. Each of these is in turn also a dict
containing the keys min and max. See default value below
for an example.
lab_thresholds : dict
each entry is a dict containing the keys l, a, and b.
Each of these is in turn also a dict containing the keys
min and max. See default value below for an example.
stain_unmixing_routine_params : dict
kwargs passed as the stain_unmixing_routine_params
argument to the deconvolution_based_normalization method
cellular_step1_sigma : float
sigma of gaussian smoothing for first cellularity step
cellular_step1_min_size : int
minimum contiguous size for first cellularity step
cellular_step2_sigma : float
sigma of gaussian smoothing for second cellularity step
cellular_largest_n : int
Number of large continugous cellular regions to keep
cellular_top_n : int
Number of final "top" cellular regions to keep
visualize : bool
whether to visualize results in DSA
opacity : float
opacity of superpixel polygons when posted to DSA.
0 (no opacity) is more efficient to render.
lineWidth : float
width of line when displaying region boundaries.
docnameprefix : str
prefix to add to annotation document name
groups_to_visualize : list
which groups to visualize
get_roi_contour : bool
whether to get the contour of the roi
"""
default_attr = {
# The following are already assigned defaults by Base_HTK_Class
# 'verbose': 1,
# 'monitorPrefix': "",
# 'logging_savepath': None,
# 'suppress_warnings': False,
'MAG': 3.0,
# Must be in ['reinhard', 'macenko_pca', 'none']
'color_normalization_method': 'macenko_pca',
# TCGA-A2-A3XS-DX1_xmin21421_ymin37486_.png, Amgad et al, 2019)
# is used as the target image for reinhard & macenko normalization
# for macenco (obtained using rgb_separate_stains_macenko_pca()
# and using reordered such that columns are the order:
# Hamtoxylin, Eosin, Null
'target_W_macenko': np.array([
[0.5807549, 0.08314027, 0.08213795],
[0.71681094, 0.90081588, 0.41999816],
[0.38588316, 0.42616716, -0.90380025]
]),
# TCGA-A2-A3XS-DX1_xmin21421_ymin37486_.png, Amgad et al, 2019)
# Reinhard color norm. standard
'target_stats_reinhard': {
'mu': np.array([8.74108109, -0.12440419, 0.0444982]),
'sigma': np.array([0.6135447, 0.10989545, 0.0286032]),
},
# kwargs for getting masks for all tissue pieces (thumbnail)
'get_tissue_mask_kwargs': {
'deconvolve_first': True, 'n_thresholding_steps': 1,
'sigma': 1.5, 'min_size': 500,
},
# components to extract by HSI thresholding
'keep_components': ['blue_sharpie', 'blood', 'whitespace', ],
# kwargs for getting components masks
'get_tissue_mask_kwargs2': {
'deconvolve_first': False, 'n_thresholding_steps': 1,
'sigma': 5.0, 'min_size': 50,
},
# min/max thresholds for HSI and LAB
'hsi_thresholds': {
'whitespace': {
'hue': {'min': 0, 'max': 1.0},
'saturation': {'min': 0, 'max': 0.2},
'intensity': {'min': 220, 'max': 255},
},
},
'lab_thresholds': {
'blue_sharpie': {
'l': {'min': -1000, 'max': 1000},
'a': {'min': -1000, 'max': 1000},
'b': {'min': -1000, 'max': -0.02},
},
'blood': {
'l': {'min': -1000, 'max': 1000},
'a': {'min': 0.02, 'max': 1000},
'b': {'min': -1000, 'max': 1000},
},
},
# for stain unmixing to deconvolove and/or color normalize
'stain_unmixing_routine_params': {
'stains': ['hematoxylin', 'eosin'],
'stain_unmixing_method': 'macenko_pca',
},
# params for getting cellular regions
'cellular_step1_sigma': 0.,
'cellular_step1_min_size': 100,
'cellular_step2_sigma': 1.5,
'cellular_largest_n': 5,
'cellular_top_n': 2,
# visualization params
'visualize': True,
'opacity': 0,
'lineWidth': 3.0,
'docnameprefix': 'cdt',
'groups_to_visualize': None, # everything
'get_roi_contour': True,
}
default_attr.update(kwargs)
super().__init__(
default_attr=default_attr)
self.color_normalization_method = \
self.color_normalization_method.lower()
assert self.color_normalization_method in [
'reinhard', 'macenko_pca', 'none']
# set attribs
self.gc = gc
self.slide_id = slide_id
self.GTcodes = GTcodes
self.fix_GTcodes()
def fix_GTcodes(self):
"""Fix self.GTcodes (important!)."""
# validate
self.GTcodes.index = self.GTcodes.loc[:, "group"]
necessary_indexes = self.keep_components + [
'outside_tissue', 'not_specified',
'maybe_cellular', 'top_cellular']
assert all(j in list(self.GTcodes.index) for j in necessary_indexes)
# Make sure the first things layed out are the "background" components
min_val = np.min(self.GTcodes.loc[:, 'overlay_order'])
self.GTcodes.loc['outside_tissue', 'overlay_order'] = min_val - 2
self.GTcodes.loc['not_specified', 'overlay_order'] = min_val - 1
# reorder in overlay order (important)
self.GTcodes.sort_values('overlay_order', axis=0, inplace=True)
self.ordered_components = list(self.GTcodes.loc[:, "group"])
# only keep relevant components (for HSI/LAB thresholding)
for c in self.ordered_components[:]:
if c not in self.keep_components:
self.ordered_components.remove(c)
def run(self):
"""Run full pipeline to detect cellular regions."""
# get mask, each unique value is a single tissue piece
self._print1(
"%s: set_slide_info_and_get_tissue_mask()" % self.monitorPrefix)
labeled = self.set_slide_info_and_get_tissue_mask()
# Go through tissue pieces and do run sequence
unique_tvals = list(set(np.unique(labeled)) - {0, })
tissue_pieces = [None for _ in range(len(unique_tvals))]
for idx, tval in enumerate(unique_tvals):
monitorPrefix = "%s: Tissue piece %d of %d" % (
self.monitorPrefix, idx+1, len(unique_tvals))
self._print1(monitorPrefix)
tissue_pieces[idx] = CDT_single_tissue_piece(
self, tissue_mask=labeled == tval, monitorPrefix=monitorPrefix)
tissue_pieces[idx].run()
# delete unnecessary attributes
del (
tissue_pieces[idx].tissue_rgb, # too much space
tissue_pieces[idx].tissue_mask, # already part of labeled
tissue_pieces[idx].maybe_cellular, # already part of labeled
tissue_pieces[idx].tissue_htx, # unnecessary
)
return tissue_pieces
def set_color_normalization_target(
self, ref_image_path, color_normalization_method='macenko_pca'):
"""Set color normalization values to use from target image.
Arguments
ref_image_path, str
> path to target (reference) image
color_normalization_method, str
> color normalization method to use. Currently, only
> reinhard and macenko_pca are accepted.
"""
from imageio import imread
# read input image
ref_im = np.array(imread(ref_image_path, pilmode='RGB'))
# assign target values
color_normalization_method = color_normalization_method.lower()
if color_normalization_method == 'reinhard':
mu, sigma = lab_mean_std(ref_im)
self.target_stats_reinhard['mu'] = mu
self.target_stats_reinhard['sigma'] = sigma
elif color_normalization_method == 'macenko_pca':
self.target_W_macenko = _reorder_stains(
rgb_separate_stains_macenko_pca(ref_im, I_0=None),
stains=['hematoxylin', 'eosin'])
else:
raise ValueError(
"Unknown color_normalization_method: %s" %
(color_normalization_method))
self.color_normalization_method = color_normalization_method
def set_slide_info_and_get_tissue_mask(self):
"""Set self.slide_info dict and self.labeled tissue mask."""
# This is a presistent dict to store information about slide
self.slide_info = self.gc.get('item/%s/tiles' % self.slide_id)
# get tissue mask
thumbnail_rgb = get_slide_thumbnail(self.gc, self.slide_id)
# get labeled tissue mask -- each unique value is one tissue piece
labeled, _ = get_tissue_mask(
thumbnail_rgb, **self.get_tissue_mask_kwargs)
if len(np.unique(labeled)) < 2:
raise ValueError("No tissue detected!")
# Find size relative to WSI
self.slide_info['F_tissue'] = self.slide_info[
'sizeX'] / labeled.shape[1]
return labeled
|
|
# -*- coding: ascii -*-
#
# Copyright 2006-2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===============================
WSGI Tackling Framework (WTF)
===============================
WSGI Tackling Framework (WTF).
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = ('0.8.24', False, 4270)
class Error(Exception):
""" Base exception for this package """
pass
class WtfWarning(Warning):
""" Base warning for this package """
@classmethod
def emit(cls, message, stacklevel=1):
""" Emit a warning of this very category """
import warnings as _warnings
_warnings.warn(message, cls, max(1, stacklevel) + 1)
def _extendquotes(envkey=None):
""" Extend _urllib.quote and _urllib.quote_plus
:Parameters:
- `envkey`: The environment key to lookup. If this key is set and ``1``
the charset definition won't be fixed and this function is a no-op.
If unset or ``None``, no lookup is made.
:Types:
- `envkey`: ``str``
"""
import os
if envkey is not None and os.environ.get(envkey) == '1':
return
import urllib
from wtf import webutil
urllib.quote = webutil.quote
urllib.quote_plus = webutil.quote_plus
urllib.unquote = webutil.unquote
urllib.unquote_plus = webutil.unquote_plus
def _fixcp1252(envkey=None):
"""
Fixup cp1252 codec in order to use it as a real superset of latin-1
:Parameters:
- `envkey`: The environment key to lookup. If this key is set and ``1``
the charset definition won't be fixed and this function is a no-op.
If unset or ``None``, no lookup is made.
:Types:
- `envkey`: ``str``
"""
import os
if envkey is not None and os.environ.get(envkey) == '1':
return
import codecs
from encodings import cp1252
try:
dmap = cp1252.decoding_map # pylint: disable = E1101
except AttributeError:
dtable = list(cp1252.decoding_table)
codepoint = 0
try:
while True:
codepoint = dtable.index(u'\ufffe', codepoint)
dtable[codepoint] = unichr(codepoint)
except ValueError:
# no more undefined points there
pass
dtable = u''.join(dtable)
cp1252.decoding_table = dtable
cp1252.encoding_table = codecs.charmap_build(dtable)
else:
# Python 2.4
for key, value in dmap.iteritems():
if value is None:
dmap[key] = key
cp1252.encoding_map = codecs.make_encoding_map(dmap)
def _register_defaults(envkey=None):
"""
Register default
:Parameters:
- `envkey`: The environment key to lookup. If this environment variable
is set and ``1`` nothing will be registered and and this function
is a no-op. If unset or ``None``, no lookup is made.
:Types:
- `envkey`: ``str``
"""
import os
if envkey is not None and os.environ.get(envkey) == '1':
return
from wtf.opi import register, daemon
register('daemon', daemon.DaemonOPI)
from wtf.opi.worker import register, threaded, single
register('threaded', threaded.ThreadedWorker)
register('single', single.SingleWorker)
from wtf.impl import register, scgi, http
register('scgi', scgi.SCGIServer)
register('http', http.HTTPServer)
def c_override(envkey=None):
"""
Factory for creating a module factory
:Parameters:
- `envkey`: Name of the environment variable which has to be "1" in
order to disable the C override.
:Types:
- `envkey`: ``str``
:return: Module factory function
:rtype: ``callable``
"""
import os
enabled = envkey is None or os.environ.get(envkey) != '1'
if enabled:
def module_factory(modname):
"""
Module factory
:Parameters:
- `modname`: dotted module name relative to the wtf package
:Types:
- `modname`: ``str``
:return: The imported module or ``None`` on import disabled or
error
:rtype: ``module``
"""
try:
mod = __import__(
'wtf.%s' % modname, globals(), locals(), ['*']
)
except ImportError:
mod = None
return mod
else:
def module_factory(modname):
"""
Module factory
:Parameters:
- `modname`: dotted module name relative to the wtf package
:Types:
- `modname`: ``str``
:return: The imported module or ``None`` on import disabled or
error
:rtype: ``module``
"""
# pylint: disable = W0613
return None
module_factory.enabled = enabled # pylint: disable = W0612
return module_factory
c_override = c_override('WTF_NO_C_OVERRIDE')
_fixcp1252('WTF_NO_CP1252_OVERRIDE')
_extendquotes('WTF_NO_QUOTE_OVERRIDE')
_register_defaults('WTF_NO_REGISTER_DEFAULT')
from wtf import util as _util
#: Version of the package
#:
#: :Type: `wtf.util.Version`
version = _util.Version(*__version__)
__all__ = _util.find_public(globals())
del _util
|
|
import os
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from spglib import (get_ir_reciprocal_mesh,
get_stabilized_reciprocal_mesh,
get_symmetry_dataset)
from vasp import read_vasp
data_dir = os.path.dirname(os.path.abspath(__file__))
result_ir_rec_mesh = (""" 0 0 0 0
1 1 0 0
2 2 0 0
1 -1 0 0
1 0 1 0
5 1 1 0
6 2 1 0
5 -1 1 0
2 0 2 0
6 1 2 0
10 2 2 0
6 -1 2 0
1 0 -1 0
5 1 -1 0
6 2 -1 0
5 -1 -1 0
1 0 0 1
5 1 0 1
6 2 0 1
5 -1 0 1
5 0 1 1
21 1 1 1
22 2 1 1
21 -1 1 1
6 0 2 1
22 1 2 1
26 2 2 1
22 -1 2 1
5 0 -1 1
21 1 -1 1
22 2 -1 1
21 -1 -1 1
2 0 0 2
6 1 0 2
10 2 0 2
6 -1 0 2
6 0 1 2
22 1 1 2
26 2 1 2
22 -1 1 2
10 0 2 2
26 1 2 2
42 2 2 2
26 -1 2 2
6 0 -1 2
22 1 -1 2
26 2 -1 2
22 -1 -1 2
1 0 0 -1
5 1 0 -1
6 2 0 -1
5 -1 0 -1
5 0 1 -1
21 1 1 -1
22 2 1 -1
21 -1 1 -1
6 0 2 -1
22 1 2 -1
26 2 2 -1
22 -1 2 -1
5 0 -1 -1
21 1 -1 -1
22 2 -1 -1
21 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
2 2 0 0
1 -1 0 0
1 0 1 0
5 1 1 0
5 2 1 0
1 -1 1 0
2 0 2 0
5 1 2 0
2 2 2 0
5 -1 2 0
1 0 -1 0
1 1 -1 0
5 2 -1 0
5 -1 -1 0
16 0 0 1
17 1 0 1
18 2 0 1
17 -1 0 1
17 0 1 1
21 1 1 1
21 2 1 1
17 -1 1 1
18 0 2 1
21 1 2 1
18 2 2 1
21 -1 2 1
17 0 -1 1
17 1 -1 1
21 2 -1 1
21 -1 -1 1""")
result_ir_rec_mesh_distortion = (""" 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
4 -1 1 0
6 0 2 0
7 1 2 0
7 -1 2 0
3 0 -1 0
4 1 -1 0
4 -1 -1 0
3 0 0 1
4 1 0 1
4 -1 0 1
15 0 1 1
16 1 1 1
16 -1 1 1
18 0 2 1
19 1 2 1
19 -1 2 1
15 0 -1 1
16 1 -1 1
16 -1 -1 1
6 0 0 2
7 1 0 2
7 -1 0 2
18 0 1 2
19 1 1 2
19 -1 1 2
30 0 2 2
31 1 2 2
31 -1 2 2
18 0 -1 2
19 1 -1 2
19 -1 -1 2
3 0 0 -1
4 1 0 -1
4 -1 0 -1
15 0 1 -1
16 1 1 -1
16 -1 1 -1
18 0 2 -1
19 1 2 -1
19 -1 2 -1
15 0 -1 -1
16 1 -1 -1
16 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
5 -1 1 0
6 0 2 0
7 1 2 0
8 -1 2 0
6 0 -2 0
8 1 -2 0
7 -1 -2 0
3 0 -1 0
5 1 -1 0
4 -1 -1 0""", """ 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
4 -1 1 0
3 0 2 0
4 1 2 0
4 -1 2 0
0 0 -1 0
1 1 -1 0
1 -1 -1 0
12 0 0 1
13 1 0 1
13 -1 0 1
15 0 1 1
16 1 1 1
16 -1 1 1
15 0 2 1
16 1 2 1
16 -1 2 1
12 0 -1 1
13 1 -1 1
13 -1 -1 1
24 0 0 2
25 1 0 2
25 -1 0 2
27 0 1 2
28 1 1 2
28 -1 1 2
27 0 2 2
28 1 2 2
28 -1 2 2
24 0 -1 2
25 1 -1 2
25 -1 -1 2
12 0 0 -1
13 1 0 -1
13 -1 0 -1
15 0 1 -1
16 1 1 -1
16 -1 1 -1
15 0 2 -1
16 1 2 -1
16 -1 2 -1
12 0 -1 -1
13 1 -1 -1
13 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
2 -1 0 0
3 0 1 0
4 1 1 0
5 -1 1 0
6 0 2 0
7 1 2 0
7 -1 2 0
3 0 -2 0
5 1 -2 0
4 -1 -2 0
0 0 -1 0
2 1 -1 0
1 -1 -1 0""")
class TestReciprocalMesh(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_ir_reciprocal_mesh(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [4, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[4, 4, 2]])
i = 0
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
ir_rec_mesh = get_ir_reciprocal_mesh(mesh, cell)
(mapping_table, grid_address) = ir_rec_mesh
# for gp, ga in zip(mapping_table, grid_address):
# print("%4d %3d %3d %3d" % (gp, ga[0], ga[1], ga[2]))
# print("")
data = np.loadtxt(StringIO(result_ir_rec_mesh[i]), dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
def test_get_stabilized_reciprocal_mesh(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [4, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[4, 4, 2]])
i = 0
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
rotations = get_symmetry_dataset(cell)['rotations']
ir_rec_mesh = get_stabilized_reciprocal_mesh(mesh, rotations)
(mapping_table, grid_address) = ir_rec_mesh
data = np.loadtxt(StringIO(result_ir_rec_mesh[i]), dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
def test_get_ir_reciprocal_mesh_distortion(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [3, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[3, 5, 1]])
i = 0
for is_shift in ([0, 0, 0], [0, 1, 0]):
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
ir_rec_mesh = get_ir_reciprocal_mesh(mesh, cell,
is_shift=is_shift)
(mapping_table, grid_address) = ir_rec_mesh
# for gp, ga in zip(mapping_table, grid_address):
# print("%4d %3d %3d %3d" % (gp, ga[0], ga[1], ga[2]))
# print("")
data = np.loadtxt(StringIO(result_ir_rec_mesh_distortion[i]),
dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestReciprocalMesh)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cStringIO
import hashlib
import json
import os
import time
from oslo.config import cfg
from nova.compute import vm_states
from nova import conductor
from nova import db
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as virtutils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
class ImageCacheManagerTestCase(test.TestCase):
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self.stock_instance_names = set(['instance-00000001',
'instance-00000002',
'instance-00000003',
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertEquals(csum, None)
def test_read_stored_checksum(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEquals(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
def test_read_stored_checksum_legacy_essex(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname = os.path.join(tmpdir, 'aaa')
old_fname = fname + '.sha1'
f = open(old_fname, 'w')
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEquals(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
info_fname = imagecache.get_info_filename(fname)
self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
'ephemeral_0_20_None',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
'00000004']
images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
'e97222e91fc4241f49a7f520d1dcf446751129b3',
'17d1b00b81642842e514494a78e804e9a511637c',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120',
'17d1b00b81642842e514494a78e804e9a511637c_10737418240']
listing.extend(images)
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
self.flags(instances_path='/var/lib/nova/instances')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
sanitized = []
for ent in image_cache_manager.unexplained_images:
sanitized.append(ent.replace(base_dir + '/', ''))
sanitized = sanitized.sort()
images = images.sort()
self.assertEquals(sanitized, images)
expected = os.path.join(base_dir,
'e97222e91fc4241f49a7f520d1dcf446751129b3')
self.assertTrue(expected in image_cache_manager.unexplained_images)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertTrue(expected in image_cache_manager.unexplained_images)
unexpected = os.path.join(base_dir, '00000004')
self.assertFalse(unexpected in image_cache_manager.unexplained_images)
for ent in image_cache_manager.unexplained_images:
self.assertTrue(ent.startswith(base_dir))
self.assertEquals(len(image_cache_manager.originals), 2)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c')
self.assertTrue(expected in image_cache_manager.originals)
unexpected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertFalse(unexpected in image_cache_manager.originals)
def test_list_running_instances(self):
all_instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'host': CONF.host,
'name': 'inst-2',
'uuid': '456',
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'kernel_id': '21',
'ramdisk_id': '22',
'host': 'remotehost',
'name': 'inst-3',
'uuid': '789',
'vm_state': '',
'task_state': ''}]
image_cache_manager = imagecache.ImageCacheManager()
# The argument here should be a context, but it's mocked out
image_cache_manager._list_running_instances(None, all_instances)
self.assertEqual(len(image_cache_manager.used_images), 4)
self.assertTrue(image_cache_manager.used_images['1'] ==
(1, 0, ['inst-1']))
self.assertTrue(image_cache_manager.used_images['2'] ==
(1, 1, ['inst-2', 'inst-3']))
self.assertTrue(image_cache_manager.used_images['21'] ==
(0, 1, ['inst-3']))
self.assertTrue(image_cache_manager.used_images['22'] ==
(0, 1, ['inst-3']))
self.assertTrue('inst-1' in image_cache_manager.instance_names)
self.assertTrue('123' in image_cache_manager.instance_names)
self.assertEqual(len(image_cache_manager.image_popularity), 4)
self.assertEqual(image_cache_manager.image_popularity['1'], 1)
self.assertEqual(image_cache_manager.image_popularity['2'], 2)
self.assertEqual(image_cache_manager.image_popularity['21'], 1)
self.assertEqual(image_cache_manager.image_popularity['22'], 1)
def test_list_resizing_instances(self):
all_instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': vm_states.RESIZED,
'task_state': None}]
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_running_instances(None, all_instances)
self.assertEqual(len(image_cache_manager.used_images), 1)
self.assertTrue(image_cache_manager.used_images['1'] ==
(1, 0, ['inst-1']))
self.assertTrue(image_cache_manager.instance_names ==
set(['inst-1', '123', 'inst-1_resize', '123_resize']))
self.assertEqual(len(image_cache_manager.image_popularity), 1)
self.assertEqual(image_cache_manager.image_popularity['1'], 1)
def test_list_backing_images_small(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEquals(inuse_images, [found])
self.assertEquals(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_resized(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEquals(inuse_images, [found])
self.assertEquals(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_instancename(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('banana-42-hamster') != -1)
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEquals(inuse_images, [found])
self.assertEquals(len(image_cache_manager.unexplained_images), 0)
def test_find_base_file_nothing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
base_dir = '/var/lib/nova/instances/_base'
fingerprint = '549867354867'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
self.assertEqual(0, len(res))
def test_find_base_file_small(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
self.stubs.Set(os.path, 'exists',
lambda x: x.endswith('%s_sm' % fingerprint))
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_sm')
self.assertTrue(res == [(base_file, True, False)])
def test_find_base_file_resized(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'exists',
lambda x: x.endswith('%s_10737418240' % fingerprint))
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertTrue(res == [(base_file, False, True)])
def test_find_base_file_all(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file1 = os.path.join(base_dir, fingerprint)
base_file2 = os.path.join(base_dir, fingerprint + '_sm')
base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertTrue(res == [(base_file1, False, False),
(base_file2, True, False),
(base_file3, False, True)])
@contextlib.contextmanager
def _intercept_log_messages(self):
try:
mylog = logging.getLogger('nova')
stream = cStringIO.StringIO()
handler = logging.logging.StreamHandler(stream)
handler.setFormatter(logging.ContextFormatter())
mylog.logger.addHandler(handler)
yield stream
finally:
mylog.logger.removeHandler(handler)
def _make_checksum(self, tmpdir):
testdata = ('OpenStack Software delivers a massively scalable cloud '
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
with open(fname, 'w') as f:
f.write(testdata)
return fname, info_fname, testdata
def test_verify_checksum(self):
img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True)
with self._intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum is valid
f = open(info_fname, 'w')
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
f.close()
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(img, fname)
self.assertTrue(res)
def test_verify_checksum_disabled(self):
img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=False)
with self._intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum is valid
f = open(info_fname, 'w')
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
f.close()
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(img, fname)
self.assertTrue(res is None)
def test_verify_checksum_invalid_json(self):
img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True)
with self._intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum is invalid, and not json
f = open(info_fname, 'w')
f.write('banana')
f.close()
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(
img, fname, create_if_missing=False)
self.assertFalse(res)
log = stream.getvalue()
# NOTE(mikal): this is a skip not a fail because the file is
# present, but is not in valid json format and therefore is
# skipped.
self.assertNotEqual(log.find('image verification skipped'), -1)
def test_verify_checksum_invalid_repaired(self):
img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum is invalid, and not json
f = open(info_fname, 'w')
f.write('banana')
f.close()
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(
img, fname, create_if_missing=True)
self.assertTrue(res is None)
def test_verify_checksum_invalid(self):
img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True)
with self._intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum is invalid, but is in valid json
f = open(info_fname, 'w')
f.write('{"sha1": "banana"}')
f.close()
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum(img, fname)
self.assertFalse(res)
log = stream.getvalue()
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
self.flags(checksum_base_images=True)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname, info_fname, testdata = self._make_checksum(tmpdir)
# Checksum file missing
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum('aaa', fname)
self.assertEquals(res, None)
# Checksum requests for a file with no checksum now have the
# side effect of creating the checksum
self.assertTrue(os.path.exists(info_fname))
@contextlib.contextmanager
def _make_base_file(self, checksum=True):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname = os.path.join(tmpdir, 'aaa')
base_file = open(fname, 'w')
base_file.write('data')
base_file.close()
base_file = open(fname, 'r')
if checksum:
imagecache.write_stored_checksum(fname)
base_file.close()
yield fname
def test_remove_base_file(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Old files get cleaned up though
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# This file should stay longer than a resized image
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Originals don't stay forever though
os.utime(fname, (-1, time.time() - 3600 * 25))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_dne(self):
# This test is solely to execute the "does not exist" code path. We
# don't expect the method being tested to do anything in this case.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
def test_remove_base_file_oserror(self):
with self._intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname = os.path.join(tmpdir, 'aaa')
os.mkdir(fname)
os.utime(fname, (-1, time.time() - 3601))
# This will raise an OSError because of file permissions
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertNotEqual(stream.getvalue().find('Failed to remove'),
-1)
def test_handle_base_image_unused(self):
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files,
[fname])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used(self):
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used_remotely(self):
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
img = '123'
with self._intercept_log_messages() as stream:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, None)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
self.assertNotEqual(stream.getvalue().find('an absent base file'),
-1)
def test_handle_base_image_used_missing(self):
img = '123'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_checksum_fails(self):
self.flags(checksum_base_images=True)
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
with open(fname, 'w') as f:
f.write('banana')
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
f.write(json.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEquals(image_cache_manager.unexplained_images, [])
self.assertEquals(image_cache_manager.removable_base_files, [])
self.assertEquals(image_cache_manager.corrupt_base_files,
[fname])
def test_verify_base_images(self):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
self.flags(instances_path='/instance_path')
self.flags(base_dir_name='_base')
self.flags(remove_unused_base_images=True)
base_file_list = ['00000001',
'ephemeral_0_20_None',
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
hashed_42,
hashed_1,
hashed_21,
hashed_22,
'%s_5368709120' % hashed_1,
'%s_10737418240' % hashed_1,
'00000004']
def fq_path(path):
return os.path.join('/instance_path/_base/', path)
# Fake base directory existence
orig_exists = os.path.exists
def exists(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_exists(path)
if path in ['/instance_path',
'/instance_path/_base',
'/instance_path/instance-1/disk',
'/instance_path/instance-2/disk',
'/instance_path/instance-3/disk',
'/instance_path/_base/%s.info' % hashed_42]:
return True
for p in base_file_list:
if path == fq_path(p):
return True
if path == fq_path(p) + '.info':
return False
if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
hashed_21,
hashed_22,
hashed_42]]:
return False
self.fail('Unexpected path existence check: %s' % path)
self.stubs.Set(os.path, 'exists', lambda x: exists(x))
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
# We need to stub utime as well
orig_utime = os.utime
self.stubs.Set(os, 'utime', lambda x, y: None)
# Fake up some instances in the instances directory
orig_listdir = os.listdir
def listdir(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_listdir(path)
if path == '/instance_path':
return ['instance-1', 'instance-2', 'instance-3', '_base']
if path == '/instance_path/_base':
return base_file_list
self.fail('Unexpected directory listed: %s' % path)
self.stubs.Set(os, 'listdir', lambda x: listdir(x))
# Fake isfile for these faked images in _base
orig_isfile = os.path.isfile
def isfile(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_isfile(path)
for p in base_file_list:
if path == fq_path(p):
return True
self.fail('Unexpected isfile call: %s' % path)
self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'kernel_id': '21',
'ramdisk_id': '22',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
image_cache_manager = imagecache.ImageCacheManager()
# Fake the utils call which finds the backing image
def get_disk_backing_file(path):
if path in ['/instance_path/instance-1/disk',
'/instance_path/instance-2/disk']:
return fq_path('%s_5368709120' % hashed_1)
self.fail('Unexpected backing file lookup: %s' % path)
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
self.stubs.Set(image_cache_manager, '_verify_checksum',
lambda x, y: y == hashed_42)
# Fake getmtime as well
orig_getmtime = os.path.getmtime
def getmtime(path):
if not path.startswith('/instance_path'):
return orig_getmtime(path)
return 1000000
self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
# Make sure we don't accidentally remove a real file
orig_remove = os.remove
def remove(path):
if not path.startswith('/instance_path'):
return orig_remove(path)
# Don't try to remove fake files
return
self.stubs.Set(os, 'remove', lambda x: remove(x))
# And finally we can make the call we're actually testing...
# The argument here should be a context, but it is mocked out
image_cache_manager.verify_base_images(None, all_instances)
# Verify
active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
fq_path(hashed_21), fq_path(hashed_22)]
self.assertEquals(image_cache_manager.active_base_files, active)
for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
fq_path(hashed_42),
fq_path('%s_10737418240' % hashed_1)]:
self.assertTrue(rem in image_cache_manager.removable_base_files)
# Ensure there are no "corrupt" images as well
self.assertTrue(len(image_cache_manager.corrupt_base_files), 0)
def test_verify_base_images_no_base(self):
self.flags(instances_path='/tmp/no/such/dir/name/please')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.verify_base_images(None, [])
def test_is_valid_info_file(self):
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'))
base_filename = os.path.join(CONF.instances_path, '_base', hashed)
is_valid_info_file = imagecache.is_valid_info_file
self.assertFalse(is_valid_info_file('banana'))
self.assertFalse(is_valid_info_file(
os.path.join(CONF.instances_path, '_base', '00000001')))
self.assertFalse(is_valid_info_file(base_filename))
self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'))
self.flags(remove_unused_base_images=True)
# Ensure there is a base directory
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
def touch(filename):
f = open(filename, 'w')
f.write('Touched')
f.close()
old = time.time() - (25 * 3600)
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
base_filename = os.path.join(tmpdir, hashed)
touch(base_filename)
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.verify_base_images(None, all_instances)
self.assertTrue(os.path.exists(base_filename))
self.assertTrue(os.path.exists(base_filename + '.info'))
def test_compute_manager(self):
was = {'called': False}
def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
return [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
compute._run_image_cache_manager_pass(None)
self.assertTrue(was['called'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.